source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
pixel.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP IIIII X X EEEEE L %
% P P I X X E L %
% PPPP I X EEE L %
% P I X X E L %
% P IIIII X X EEEEE LLLLL %
% %
% MagickCore Methods to Import/Export Pixels %
% %
% Software Design %
% Cristy %
% October 1998 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/property.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/constitute.h"
#include "MagickCore/delegate.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/statistic.h"
#include "MagickCore/stream.h"
#include "MagickCore/string_.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C h a n n e l M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelChannelMap() acquires a pixel component map.
%
% The format of the AcquirePixelChannelMap() method is:
%
% PixelChannelMap *AcquirePixelChannelMap(void)
%
*/
MagickExport PixelChannelMap *AcquirePixelChannelMap(void)
{
PixelChannelMap
*channel_map;
ssize_t
i;
channel_map=(PixelChannelMap *) AcquireQuantumMemory(MaxPixelChannels,
sizeof(*channel_map));
if (channel_map == (PixelChannelMap *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_map,0,MaxPixelChannels*sizeof(*channel_map));
for (i=0; i < MaxPixelChannels; i++)
channel_map[i].channel=(PixelChannel) i;
return(channel_map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C h a n n e l M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelChannelMap() clones a pixel component map.
%
% The format of the ClonePixelChannelMap() method is:
%
% PixelChannelMap *ClonePixelChannelMap(PixelChannelMap *channel_map)
%
% A description of each parameter follows:
%
% o channel_map: the pixel component map.
%
*/
MagickExport PixelChannelMap *ClonePixelChannelMap(PixelChannelMap *channel_map)
{
PixelChannelMap
*clone_map;
assert(channel_map != (PixelChannelMap *) NULL);
clone_map=AcquirePixelChannelMap();
if (clone_map == (PixelChannelMap *) NULL)
return((PixelChannelMap *) NULL);
(void) memcpy(clone_map,channel_map,MaxPixelChannels*
sizeof(*channel_map));
return(clone_map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelInfo() makes a duplicate of the given pixel info structure, or if
% pixel info is NULL, a new one.
%
% The format of the ClonePixelInfo method is:
%
% PixelInfo *ClonePixelInfo(const PixelInfo *pixel)
%
% A description of each parameter follows:
%
% o pixel: the pixel info.
%
*/
MagickExport PixelInfo *ClonePixelInfo(const PixelInfo *pixel)
{
PixelInfo
*pixel_info;
pixel_info=(PixelInfo *) AcquireMagickMemory(sizeof(*pixel_info));
if (pixel_info == (PixelInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*pixel_info=(*pixel);
return(pixel_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n f o r m P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConformPixelInfo() ensures the pixel conforms with the colorspace and alpha
% attribute of the image.
%
% The format of the ConformPixelInfo method is:
%
% void *ConformPixelInfo((Image *image,const PixelInfo *source,
% PixelInfo *destination,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source pixel info.
%
% o destination: the destination pixel info.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void ConformPixelInfo(Image *image,const PixelInfo *source,
PixelInfo *destination,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(destination != (const PixelInfo *) NULL);
*destination=(*source);
if (image->colorspace == CMYKColorspace)
{
if (IssRGBCompatibleColorspace(destination->colorspace) != MagickFalse)
ConvertRGBToCMYK(destination);
}
else
if (destination->colorspace == CMYKColorspace)
{
if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse)
ConvertCMYKToRGB(destination);
}
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) TransformImageColorspace(image,sRGBColorspace,exception);
if ((destination->alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(image,OpaqueAlpha,exception);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e c o d e P i x e l G a m m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DecodePixelGamma() applies the expansive power-law nonlinearity to the pixel.
%
% The format of the DecodePixelGamma method is:
%
% double DecodePixelGamma(const MagickRealType pixel)
%
% A description of each parameter follows:
%
% o pixel: the pixel.
%
*/
static inline double DecodeGamma(const double x)
{
div_t
quotient;
double
p,
term[9];
int
exponent;
static const double coefficient[] = /* terms for x^(7/5), x=1.5 */
{
1.7917488588043277509,
0.82045614371976854984,
0.027694100686325412819,
-0.00094244335181762134018,
0.000064355540911469709545,
-5.7224404636060757485e-06,
5.8767669437311184313e-07,
-6.6139920053589721168e-08,
7.9323242696227458163e-09
};
static const double powers_of_two[] = /* (2^x)^(7/5) */
{
1.0,
2.6390158215457883983,
6.9644045063689921093,
1.8379173679952558018e+01,
4.8502930128332728543e+01
};
/*
Compute x^2.4 == x*x^(7/5) == pow(x,2.4).
*/
term[0]=1.0;
term[1]=4.0*frexp(x,&exponent)-3.0;
term[2]=2.0*term[1]*term[1]-term[0];
term[3]=2.0*term[1]*term[2]-term[1];
term[4]=2.0*term[1]*term[3]-term[2];
term[5]=2.0*term[1]*term[4]-term[3];
term[6]=2.0*term[1]*term[5]-term[4];
term[7]=2.0*term[1]*term[6]-term[5];
term[8]=2.0*term[1]*term[7]-term[6];
p=coefficient[0]*term[0]+coefficient[1]*term[1]+coefficient[2]*term[2]+
coefficient[3]*term[3]+coefficient[4]*term[4]+coefficient[5]*term[5]+
coefficient[6]*term[6]+coefficient[7]*term[7]+coefficient[8]*term[8];
quotient=div(exponent-1,5);
if (quotient.rem < 0)
{
quotient.quot-=1;
quotient.rem+=5;
}
return(x*ldexp(powers_of_two[quotient.rem]*p,7*quotient.quot));
}
MagickExport MagickRealType DecodePixelGamma(const MagickRealType pixel)
{
if (pixel <= (0.0404482362771076*QuantumRange))
return(pixel/12.92f);
return((MagickRealType) (QuantumRange*DecodeGamma((double) (QuantumScale*
pixel+0.055)/1.055)));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C h a n n e l M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelChannelMap() deallocates memory associated with the pixel
% channel map.
%
% The format of the DestroyPixelChannelMap() method is:
%
% PixelChannelMap *DestroyPixelChannelMap(PixelChannelMap *channel_map)
%
% A description of each parameter follows:
%
% o channel_map: the pixel component map.
%
*/
MagickExport PixelChannelMap *DestroyPixelChannelMap(
PixelChannelMap *channel_map)
{
assert(channel_map != (PixelChannelMap *) NULL);
channel_map=(PixelChannelMap *) RelinquishMagickMemory(channel_map);
return((PixelChannelMap *) RelinquishMagickMemory(channel_map));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E n c o d e P i x e l G a m m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EncodePixelGamma() cancels any nonlinearity in the pixel.
%
% The format of the EncodePixelGamma method is:
%
% MagickRealType EncodePixelGamma(const double MagickRealType)
%
% A description of each parameter follows:
%
% o pixel: the pixel.
%
*/
static inline double EncodeGamma(const double x)
{
div_t
quotient;
double
p,
term[9];
int
exponent;
static const double coefficient[] = /* Chebychevi poly: x^(5/12), x=1.5 */
{
1.1758200232996901923,
0.16665763094889061230,
-0.0083154894939042125035,
0.00075187976780420279038,
-0.000083240178519391795367,
0.000010229209410070008679,
-1.3400466409860246e-06,
1.8333422241635376682e-07,
-2.5878596761348859722e-08
};
static const double powers_of_two[] = /* (2^N)^(5/12) */
{
1.0,
1.3348398541700343678,
1.7817974362806785482,
2.3784142300054420538,
3.1748021039363991669,
4.2378523774371812394,
5.6568542494923805819,
7.5509945014535482244,
1.0079368399158985525e1,
1.3454342644059433809e1,
1.7959392772949968275e1,
2.3972913230026907883e1
};
/*
Compute x^(1/2.4) == x^(5/12) == pow(x,1.0/2.4).
*/
term[0]=1.0;
term[1]=4.0*frexp(x,&exponent)-3.0;
term[2]=2.0*term[1]*term[1]-term[0];
term[3]=2.0*term[1]*term[2]-term[1];
term[4]=2.0*term[1]*term[3]-term[2];
term[5]=2.0*term[1]*term[4]-term[3];
term[6]=2.0*term[1]*term[5]-term[4];
term[7]=2.0*term[1]*term[6]-term[5];
term[8]=2.0*term[1]*term[7]-term[6];
p=coefficient[0]*term[0]+coefficient[1]*term[1]+coefficient[2]*term[2]+
coefficient[3]*term[3]+coefficient[4]*term[4]+coefficient[5]*term[5]+
coefficient[6]*term[6]+coefficient[7]*term[7]+coefficient[8]*term[8];
quotient=div(exponent-1,12);
if (quotient.rem < 0)
{
quotient.quot-=1;
quotient.rem+=12;
}
return(ldexp(powers_of_two[quotient.rem]*p,5*quotient.quot));
}
MagickExport MagickRealType EncodePixelGamma(const MagickRealType pixel)
{
if (pixel <= (0.0031306684425005883*QuantumRange))
return(12.92f*pixel);
return((MagickRealType) QuantumRange*(1.055*EncodeGamma((double) QuantumScale*
pixel)-0.055));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x p o r t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExportImagePixels() extracts pixel data from an image and returns it to you.
% The method returns MagickTrue on success otherwise MagickFalse if an error is
% encountered. The data is returned as char, short int, Quantum, unsigned int,
% unsigned long long, float, or double in the order specified by map.
%
% Suppose you want to extract the first scanline of a 640x480 image as
% character data in red-green-blue order:
%
% ExportImagePixels(image,0,0,640,1,"RGB",CharPixel,pixels,exception);
%
% The format of the ExportImagePixels method is:
%
% MagickBooleanType ExportImagePixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height,
% const char *map,const StorageType type,void *pixels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,width,height: These values define the perimeter
% of a region of pixels you want to extract.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o type: Define the data type of the pixels. Float and double types are
% normalized to [0..1] otherwise [0..QuantumRange]. Choose from these
% types: CharPixel (char *), DoublePixel (double *), FloatPixel (float *),
% LongPixel (unsigned int *), LongLongPixel (unsigned long long *),
% QuantumPixel (Quantum *), or ShortPixel (unsigned short *).
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ExportCharPixel(const Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
ssize_t
x;
unsigned char
*magick_restrict q;
size_t
length;
ssize_t
y;
q=(unsigned char *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
*q++=ScaleQuantumToChar(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
*q++=ScaleQuantumToChar((Quantum) 0);
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity(image,p)));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar((Quantum) 0);
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
*q=0;
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
*q=ScaleQuantumToChar(GetPixelRed(image,p));
break;
}
case GreenQuantum:
case MagentaQuantum:
{
*q=ScaleQuantumToChar(GetPixelGreen(image,p));
break;
}
case BlueQuantum:
case YellowQuantum:
{
*q=ScaleQuantumToChar(GetPixelBlue(image,p));
break;
}
case AlphaQuantum:
{
*q=ScaleQuantumToChar(GetPixelAlpha(image,p));
break;
}
case OpacityQuantum:
{
*q=ScaleQuantumToChar(GetPixelAlpha(image,p));
break;
}
case BlackQuantum:
{
if (image->colorspace == CMYKColorspace)
*q=ScaleQuantumToChar(GetPixelBlack(image,p));
break;
}
case IndexQuantum:
{
*q=ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity(image,p)));
break;
}
default:
break;
}
q++;
}
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ExportDoublePixel(const Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
double
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
q=(double *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(double) (QuantumScale*GetPixelBlue(image,p));
*q++=(double) (QuantumScale*GetPixelGreen(image,p));
*q++=(double) (QuantumScale*GetPixelRed(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(double) (QuantumScale*GetPixelBlue(image,p));
*q++=(double) (QuantumScale*GetPixelGreen(image,p));
*q++=(double) (QuantumScale*GetPixelRed(image,p));
*q++=(double) (QuantumScale*GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(double) (QuantumScale*GetPixelBlue(image,p));
*q++=(double) (QuantumScale*GetPixelGreen(image,p));
*q++=(double) (QuantumScale*GetPixelRed(image,p));
*q++=0.0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(double) (QuantumScale*GetPixelIntensity(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(double) (QuantumScale*GetPixelRed(image,p));
*q++=(double) (QuantumScale*GetPixelGreen(image,p));
*q++=(double) (QuantumScale*GetPixelBlue(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(double) (QuantumScale*GetPixelRed(image,p));
*q++=(double) (QuantumScale*GetPixelGreen(image,p));
*q++=(double) (QuantumScale*GetPixelBlue(image,p));
*q++=(double) (QuantumScale*GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(double) (QuantumScale*GetPixelRed(image,p));
*q++=(double) (QuantumScale*GetPixelGreen(image,p));
*q++=(double) (QuantumScale*GetPixelBlue(image,p));
*q++=0.0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
*q=0;
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
*q=(double) (QuantumScale*GetPixelRed(image,p));
break;
}
case GreenQuantum:
case MagentaQuantum:
{
*q=(double) (QuantumScale*GetPixelGreen(image,p));
break;
}
case BlueQuantum:
case YellowQuantum:
{
*q=(double) (QuantumScale*GetPixelBlue(image,p));
break;
}
case AlphaQuantum:
{
*q=(double) (QuantumScale*GetPixelAlpha(image,p));
break;
}
case OpacityQuantum:
{
*q=(double) (QuantumScale*GetPixelAlpha(image,p));
break;
}
case BlackQuantum:
{
if (image->colorspace == CMYKColorspace)
*q=(double) (QuantumScale*
GetPixelBlack(image,p));
break;
}
case IndexQuantum:
{
*q=(double) (QuantumScale*GetPixelIntensity(image,p));
break;
}
default:
*q=0;
}
q++;
}
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ExportFloatPixel(const Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
float
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
q=(float *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(float) (QuantumScale*GetPixelBlue(image,p));
*q++=(float) (QuantumScale*GetPixelGreen(image,p));
*q++=(float) (QuantumScale*GetPixelRed(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(float) (QuantumScale*GetPixelBlue(image,p));
*q++=(float) (QuantumScale*GetPixelGreen(image,p));
*q++=(float) (QuantumScale*GetPixelRed(image,p));
*q++=(float) (QuantumScale*GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(float) (QuantumScale*GetPixelBlue(image,p));
*q++=(float) (QuantumScale*GetPixelGreen(image,p));
*q++=(float) (QuantumScale*GetPixelRed(image,p));
*q++=0.0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(float) (QuantumScale*GetPixelIntensity(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(float) (QuantumScale*GetPixelRed(image,p));
*q++=(float) (QuantumScale*GetPixelGreen(image,p));
*q++=(float) (QuantumScale*GetPixelBlue(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(float) (QuantumScale*GetPixelRed(image,p));
*q++=(float) (QuantumScale*GetPixelGreen(image,p));
*q++=(float) (QuantumScale*GetPixelBlue(image,p));
*q++=(float) (QuantumScale*GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=(float) (QuantumScale*GetPixelRed(image,p));
*q++=(float) (QuantumScale*GetPixelGreen(image,p));
*q++=(float) (QuantumScale*GetPixelBlue(image,p));
*q++=0.0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
*q=0;
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
*q=(float) (QuantumScale*GetPixelRed(image,p));
break;
}
case GreenQuantum:
case MagentaQuantum:
{
*q=(float) (QuantumScale*GetPixelGreen(image,p));
break;
}
case BlueQuantum:
case YellowQuantum:
{
*q=(float) (QuantumScale*GetPixelBlue(image,p));
break;
}
case AlphaQuantum:
{
*q=(float) (QuantumScale*((Quantum) (GetPixelAlpha(image,p))));
break;
}
case OpacityQuantum:
{
*q=(float) (QuantumScale*GetPixelAlpha(image,p));
break;
}
case BlackQuantum:
{
if (image->colorspace == CMYKColorspace)
*q=(float) (QuantumScale* GetPixelBlack(image,p));
break;
}
case IndexQuantum:
{
*q=(float) (QuantumScale*GetPixelIntensity(image,p));
break;
}
default:
*q=0;
}
q++;
}
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ExportLongPixel(const Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
ssize_t
x;
unsigned int
*magick_restrict q;
size_t
length;
ssize_t
y;
q=(unsigned int *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLong(GetPixelRed(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLong(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLong(GetPixelRed(image,p));
*q++=0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLong(ClampToQuantum(GetPixelIntensity(image,p)));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLong(GetPixelBlue(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLong(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLong(GetPixelBlue(image,p));
*q++=0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
*q=0;
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
*q=ScaleQuantumToLong(GetPixelRed(image,p));
break;
}
case GreenQuantum:
case MagentaQuantum:
{
*q=ScaleQuantumToLong(GetPixelGreen(image,p));
break;
}
case BlueQuantum:
case YellowQuantum:
{
*q=ScaleQuantumToLong(GetPixelBlue(image,p));
break;
}
case AlphaQuantum:
{
*q=ScaleQuantumToLong(GetPixelAlpha(image,p));
break;
}
case OpacityQuantum:
{
*q=ScaleQuantumToLong(GetPixelAlpha(image,p));
break;
}
case BlackQuantum:
{
if (image->colorspace == CMYKColorspace)
*q=ScaleQuantumToLong(GetPixelBlack(image,p));
break;
}
case IndexQuantum:
{
*q=ScaleQuantumToLong(ClampToQuantum(GetPixelIntensity(image,p)));
break;
}
default:
break;
}
q++;
}
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ExportLongLongPixel(const Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
ssize_t
x;
MagickSizeType
*magick_restrict q;
size_t
length;
ssize_t
y;
q=(MagickSizeType *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLongLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLongLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLongLong(GetPixelRed(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLongLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLongLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLongLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLongLong(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLongLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLongLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLongLong(GetPixelRed(image,p));
*q++=0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLongLong(ClampToQuantum(
GetPixelIntensity(image,p)));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLongLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLongLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLongLong(GetPixelBlue(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLongLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLongLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLongLong(GetPixelBlue(image,p));
*q++=ScaleQuantumToLongLong(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToLongLong(GetPixelRed(image,p));
*q++=ScaleQuantumToLongLong(GetPixelGreen(image,p));
*q++=ScaleQuantumToLongLong(GetPixelBlue(image,p));
*q++=0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
*q=0;
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
*q=ScaleQuantumToLongLong(GetPixelRed(image,p));
break;
}
case GreenQuantum:
case MagentaQuantum:
{
*q=ScaleQuantumToLongLong(GetPixelGreen(image,p));
break;
}
case BlueQuantum:
case YellowQuantum:
{
*q=ScaleQuantumToLongLong(GetPixelBlue(image,p));
break;
}
case AlphaQuantum:
{
*q=ScaleQuantumToLongLong(GetPixelAlpha(image,p));
break;
}
case OpacityQuantum:
{
*q=ScaleQuantumToLongLong(GetPixelAlpha(image,p));
break;
}
case BlackQuantum:
{
if (image->colorspace == CMYKColorspace)
*q=ScaleQuantumToLongLong(GetPixelBlack(image,p));
break;
}
case IndexQuantum:
{
*q=ScaleQuantumToLongLong(ClampToQuantum(
GetPixelIntensity(image,p)));
break;
}
default:
break;
}
q++;
}
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ExportQuantumPixel(const Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
q=(Quantum *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=GetPixelBlue(image,p);
*q++=GetPixelGreen(image,p);
*q++=GetPixelRed(image,p);
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=GetPixelBlue(image,p);
*q++=GetPixelGreen(image,p);
*q++=GetPixelRed(image,p);
*q++=(Quantum) (GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=GetPixelBlue(image,p);
*q++=GetPixelGreen(image,p);
*q++=GetPixelRed(image,p);
*q++=(Quantum) 0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ClampToQuantum(GetPixelIntensity(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=GetPixelRed(image,p);
*q++=GetPixelGreen(image,p);
*q++=GetPixelBlue(image,p);
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=GetPixelRed(image,p);
*q++=GetPixelGreen(image,p);
*q++=GetPixelBlue(image,p);
*q++=(Quantum) (GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=GetPixelRed(image,p);
*q++=GetPixelGreen(image,p);
*q++=GetPixelBlue(image,p);
*q++=(Quantum) 0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
*q=(Quantum) 0;
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
*q=GetPixelRed(image,p);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
*q=GetPixelGreen(image,p);
break;
}
case BlueQuantum:
case YellowQuantum:
{
*q=GetPixelBlue(image,p);
break;
}
case AlphaQuantum:
{
*q=GetPixelAlpha(image,p);
break;
}
case OpacityQuantum:
{
*q=GetPixelAlpha(image,p);
break;
}
case BlackQuantum:
{
if (image->colorspace == CMYKColorspace)
*q=GetPixelBlack(image,p);
break;
}
case IndexQuantum:
{
*q=ClampToQuantum(GetPixelIntensity(image,p));
break;
}
default:
{
*q=(Quantum) 0;
break;
}
}
q++;
}
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ExportShortPixel(const Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
ssize_t
x;
unsigned short
*magick_restrict q;
size_t
length;
ssize_t
y;
q=(unsigned short *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToShort(GetPixelBlue(image,p));
*q++=ScaleQuantumToShort(GetPixelGreen(image,p));
*q++=ScaleQuantumToShort(GetPixelRed(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToShort(GetPixelBlue(image,p));
*q++=ScaleQuantumToShort(GetPixelGreen(image,p));
*q++=ScaleQuantumToShort(GetPixelRed(image,p));
*q++=ScaleQuantumToShort(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToShort(GetPixelBlue(image,p));
*q++=ScaleQuantumToShort(GetPixelGreen(image,p));
*q++=ScaleQuantumToShort(GetPixelRed(image,p));
*q++=0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToShort(ClampToQuantum(GetPixelIntensity(image,p)));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToShort(GetPixelRed(image,p));
*q++=ScaleQuantumToShort(GetPixelGreen(image,p));
*q++=ScaleQuantumToShort(GetPixelBlue(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToShort(GetPixelRed(image,p));
*q++=ScaleQuantumToShort(GetPixelGreen(image,p));
*q++=ScaleQuantumToShort(GetPixelBlue(image,p));
*q++=ScaleQuantumToShort(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
*q++=ScaleQuantumToShort(GetPixelRed(image,p));
*q++=ScaleQuantumToShort(GetPixelGreen(image,p));
*q++=ScaleQuantumToShort(GetPixelBlue(image,p));
*q++=0;
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
*q=0;
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
*q=ScaleQuantumToShort(GetPixelRed(image,p));
break;
}
case GreenQuantum:
case MagentaQuantum:
{
*q=ScaleQuantumToShort(GetPixelGreen(image,p));
break;
}
case BlueQuantum:
case YellowQuantum:
{
*q=ScaleQuantumToShort(GetPixelBlue(image,p));
break;
}
case AlphaQuantum:
{
*q=ScaleQuantumToShort(GetPixelAlpha(image,p));
break;
}
case OpacityQuantum:
{
*q=ScaleQuantumToShort(GetPixelAlpha(image,p));
break;
}
case BlackQuantum:
{
if (image->colorspace == CMYKColorspace)
*q=ScaleQuantumToShort(GetPixelBlack(image,p));
break;
}
case IndexQuantum:
{
*q=ScaleQuantumToShort(ClampToQuantum(GetPixelIntensity(image,p)));
break;
}
default:
break;
}
q++;
}
p+=GetPixelChannels(image);
}
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
MagickExport MagickBooleanType ExportImagePixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const char *map,const StorageType type,void *pixels,ExceptionInfo *exception)
{
MagickBooleanType
status;
QuantumType
*quantum_map;
RectangleInfo
roi;
ssize_t
i;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=strlen(map);
quantum_map=(QuantumType *) AcquireQuantumMemory(length,sizeof(*quantum_map));
if (quantum_map == (QuantumType *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
for (i=0; i < (ssize_t) length; i++)
{
switch (map[i])
{
case 'A':
case 'a':
{
quantum_map[i]=AlphaQuantum;
break;
}
case 'B':
case 'b':
{
quantum_map[i]=BlueQuantum;
break;
}
case 'C':
case 'c':
{
quantum_map[i]=CyanQuantum;
if (image->colorspace == CMYKColorspace)
break;
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ColorSeparatedImageRequired","`%s'",map);
return(MagickFalse);
}
case 'g':
case 'G':
{
quantum_map[i]=GreenQuantum;
break;
}
case 'I':
case 'i':
{
quantum_map[i]=IndexQuantum;
break;
}
case 'K':
case 'k':
{
quantum_map[i]=BlackQuantum;
if (image->colorspace == CMYKColorspace)
break;
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ColorSeparatedImageRequired","`%s'",map);
return(MagickFalse);
}
case 'M':
case 'm':
{
quantum_map[i]=MagentaQuantum;
if (image->colorspace == CMYKColorspace)
break;
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ColorSeparatedImageRequired","`%s'",map);
return(MagickFalse);
}
case 'o':
case 'O':
{
quantum_map[i]=OpacityQuantum;
break;
}
case 'P':
case 'p':
{
quantum_map[i]=UndefinedQuantum;
break;
}
case 'R':
case 'r':
{
quantum_map[i]=RedQuantum;
break;
}
case 'Y':
case 'y':
{
quantum_map[i]=YellowQuantum;
if (image->colorspace == CMYKColorspace)
break;
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ColorSeparatedImageRequired","`%s'",map);
return(MagickFalse);
}
default:
{
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedPixelMap","`%s'",map);
return(MagickFalse);
}
}
}
roi.width=width;
roi.height=height;
roi.x=x;
roi.y=y;
switch (type)
{
case CharPixel:
{
status=ExportCharPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case DoublePixel:
{
status=ExportDoublePixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case FloatPixel:
{
status=ExportFloatPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case LongPixel:
{
status=ExportLongPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case LongLongPixel:
{
status=ExportLongLongPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case QuantumPixel:
{
status=ExportQuantumPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case ShortPixel:
{
status=ExportShortPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
default:
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedPixelMap","`%s'",map);
status=MagickFalse;
}
}
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelInfo() initializes the PixelInfo structure.
%
% The format of the GetPixelInfo method is:
%
% GetPixelInfo(const Image *image,PixelInfo *pixel)
%
% A description of each parameter follows:
%
% o image: the image. (optional - may be NULL)
%
% o pixel: Specifies a pointer to a PixelInfo structure.
%
*/
MagickExport void GetPixelInfo(const Image *image,PixelInfo *pixel)
{
(void) memset(pixel,0,sizeof(*pixel));
pixel->storage_class=DirectClass;
pixel->colorspace=sRGBColorspace;
pixel->depth=MAGICKCORE_QUANTUM_DEPTH;
pixel->alpha_trait=UndefinedPixelTrait;
pixel->alpha=(double) OpaqueAlpha;
if (image == (const Image *) NULL)
return;
pixel->storage_class=image->storage_class;
pixel->colorspace=image->colorspace;
pixel->alpha_trait=image->alpha_trait;
pixel->depth=image->depth;
pixel->fuzz=image->fuzz;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l I n d o I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelInfoIntensity() returns a single sample intensity value from the red,
% green, and blue components of a pixel based on the selected method:
%
% Rec601Luma 0.298839R' + 0.586811G' + 0.114350B'
% Rec601Luminance 0.298839R + 0.586811G + 0.114350B
% Rec709Luma 0.212656R' + 0.715158G' + 0.072186B'
% Rec709Luminance 0.212656R + 0.715158G + 0.072186B
% Brightness max(R', G', B')
% Lightness (min(R', G', B') + max(R', G', B')) / 2.0
%
% MS (R^2 + G^2 + B^2) / 3.0
% RMS sqrt((R^2 + G^2 + B^2) / 3.0
% Average (R + G + B') / 3.0
%
% The format of the GetPixelInfoIntensity method is:
%
% MagickRealType GetPixelInfoIntensity(const Image *image,
% const Quantum *pixel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pixel: Specifies a pointer to a Quantum structure.
%
*/
MagickExport MagickRealType GetPixelInfoIntensity(
const Image *magick_restrict image,const PixelInfo *magick_restrict pixel)
{
MagickRealType
blue,
green,
red,
intensity;
PixelIntensityMethod
method;
method=Rec709LumaPixelIntensityMethod;
if (image != (const Image *) NULL)
method=image->intensity;
red=pixel->red;
green=pixel->green;
blue=pixel->blue;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+blue*blue)/
(3.0*QuantumRange));
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (pixel->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (pixel->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (pixel->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (pixel->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+blue*blue)/
sqrt(3.0));
break;
}
}
return(intensity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelIntensity() returns a single sample intensity value from the red,
% green, and blue components of a pixel based on the selected method:
%
% Rec601Luma 0.298839R' + 0.586811G' + 0.114350B'
% Rec601Luminance 0.298839R + 0.586811G + 0.114350B
% Rec709Luma 0.212656R' + 0.715158G' + 0.072186B'
% Rec709Luminance 0.212656R + 0.715158G + 0.072186B
% Brightness max(R', G', B')
% Lightness (min(R', G', B') + max(R', G', B')) / 2.0
%
% MS (R^2 + G^2 + B^2) / 3.0
% RMS sqrt((R^2 + G^2 + B^2) / 3.0
% Average (R + G + B') / 3.0
%
% The format of the GetPixelIntensity method is:
%
% MagickRealType GetPixelIntensity(const Image *image,
% const Quantum *pixel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pixel: Specifies a pointer to a Quantum structure.
%
*/
MagickExport MagickRealType GetPixelIntensity(
const Image *magick_restrict image,const Quantum *magick_restrict pixel)
{
MagickRealType
blue,
green,
red,
intensity;
red=(MagickRealType) GetPixelRed(image,pixel);
if (image->number_channels == 1)
return(red);
green=(MagickRealType) GetPixelGreen(image,pixel);
blue=(MagickRealType) GetPixelBlue(image,pixel);
switch (image->intensity)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+blue*blue)/
(3.0*QuantumRange));
break;
}
case Rec601LumaPixelIntensityMethod:
{
if ((image->colorspace == RGBColorspace) ||
(image->colorspace == LinearGRAYColorspace))
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if ((image->colorspace == sRGBColorspace) ||
(image->colorspace == GRAYColorspace))
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if ((image->colorspace == RGBColorspace) ||
(image->colorspace == LinearGRAYColorspace))
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if ((image->colorspace == sRGBColorspace) ||
(image->colorspace == GRAYColorspace))
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+blue*blue)/
sqrt(3.0));
break;
}
}
return(intensity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p o r t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImportImagePixels() accepts pixel data and stores in the image at the
% location you specify. The method returns MagickTrue on success otherwise
% MagickFalse if an error is encountered. The pixel data can be either char,
% Quantum, short int, unsigned int, unsigned long long, float, or double in
% the order specified by map.
%
% Suppose your want to upload the first scanline of a 640x480 image from
% character data in red-green-blue order:
%
% ImportImagePixels(image,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the ImportImagePixels method is:
%
% MagickBooleanType ImportImagePixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height,
% const char *map,const StorageType type,const void *pixels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,width,height: These values define the perimeter
% of a region of pixels you want to define.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o type: Define the data type of the pixels. Float and double types are
% normalized to [0..1] otherwise [0..QuantumRange]. Choose from these
% types: CharPixel (char *), DoublePixel (double *), FloatPixel (float *),
% LongPixel (unsigned int *), LongLongPixel (unsigned long long *),
% QuantumPixel (Quantum *), or ShortPixel (unsigned short *).
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ImportCharPixel(Image *image,const RectangleInfo *roi,
const char *magick_restrict map,const QuantumType *quantum_map,
const void *pixels,ExceptionInfo *exception)
{
const unsigned char
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
p=(const unsigned char *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelAlpha(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRO") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelAlpha(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelGray(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelAlpha(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBO") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelAlpha(image,ScaleCharToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
SetPixelRed(image,ScaleCharToQuantum(*p),q);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
SetPixelGreen(image,ScaleCharToQuantum(*p),q);
break;
}
case BlueQuantum:
case YellowQuantum:
{
SetPixelBlue(image,ScaleCharToQuantum(*p),q);
break;
}
case AlphaQuantum:
{
SetPixelAlpha(image,ScaleCharToQuantum(*p),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,ScaleCharToQuantum(*p),q);
break;
}
case BlackQuantum:
{
SetPixelBlack(image,ScaleCharToQuantum(*p),q);
break;
}
case IndexQuantum:
{
SetPixelGray(image,ScaleCharToQuantum(*p),q);
break;
}
default:
break;
}
p++;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ImportDoublePixel(Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,const void *pixels,ExceptionInfo *exception)
{
const double
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
p=(const double *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelGray(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case BlueQuantum:
case YellowQuantum:
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case AlphaQuantum:
{
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case BlackQuantum:
{
SetPixelBlack(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case IndexQuantum:
{
SetPixelGray(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
default:
break;
}
p++;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ImportFloatPixel(Image *image,const RectangleInfo *roi,
const char *magick_restrict map,const QuantumType *quantum_map,
const void *pixels,ExceptionInfo *exception)
{
const float
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
p=(const float *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelGray(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case BlueQuantum:
case YellowQuantum:
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case AlphaQuantum:
{
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case BlackQuantum:
{
SetPixelBlack(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
case IndexQuantum:
{
SetPixelGray(image,ClampToQuantum(QuantumRange*(*p)),q);
break;
}
default:
break;
}
p++;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ImportLongPixel(Image *image,const RectangleInfo *roi,
const char *magick_restrict map,const QuantumType *quantum_map,
const void *pixels,ExceptionInfo *exception)
{
const unsigned int
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
p=(const unsigned int *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongToQuantum(*p++),q);
SetPixelRed(image,ScaleLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongToQuantum(*p++),q);
SetPixelRed(image,ScaleLongToQuantum(*p++),q);
SetPixelAlpha(image,ScaleLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongToQuantum(*p++),q);
SetPixelRed(image,ScaleLongToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelGray(image,ScaleLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongToQuantum(*p++),q);
SetPixelBlue(image,ScaleLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongToQuantum(*p++),q);
SetPixelBlue(image,ScaleLongToQuantum(*p++),q);
SetPixelAlpha(image,ScaleLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongToQuantum(*p++),q);
SetPixelBlue(image,ScaleLongToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
SetPixelRed(image,ScaleLongToQuantum(*p),q);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
SetPixelGreen(image,ScaleLongToQuantum(*p),q);
break;
}
case BlueQuantum:
case YellowQuantum:
{
SetPixelBlue(image,ScaleLongToQuantum(*p),q);
break;
}
case AlphaQuantum:
{
SetPixelAlpha(image,ScaleLongToQuantum(*p),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,ScaleLongToQuantum(*p),q);
break;
}
case BlackQuantum:
{
SetPixelBlack(image,ScaleLongToQuantum(*p),q);
break;
}
case IndexQuantum:
{
SetPixelGray(image,ScaleLongToQuantum(*p),q);
break;
}
default:
break;
}
p++;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ImportLongLongPixel(Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,const void *pixels,ExceptionInfo *exception)
{
const MagickSizeType
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
p=(const MagickSizeType *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q);
SetPixelRed(image,ScaleLongLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q);
SetPixelRed(image,ScaleLongLongToQuantum(*p++),q);
SetPixelAlpha(image,ScaleLongLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q);
SetPixelRed(image,ScaleLongLongToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelGray(image,ScaleLongLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleLongLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q);
SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleLongLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q);
SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q);
SetPixelAlpha(image,ScaleLongLongToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleLongLongToQuantum(*p++),q);
SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q);
SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
SetPixelRed(image,ScaleLongLongToQuantum(*p),q);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
SetPixelGreen(image,ScaleLongLongToQuantum(*p),q);
break;
}
case BlueQuantum:
case YellowQuantum:
{
SetPixelBlue(image,ScaleLongLongToQuantum(*p),q);
break;
}
case AlphaQuantum:
{
SetPixelAlpha(image,ScaleLongLongToQuantum(*p),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,ScaleLongLongToQuantum(*p),q);
break;
}
case BlackQuantum:
{
SetPixelBlack(image,ScaleLongLongToQuantum(*p),q);
break;
}
case IndexQuantum:
{
SetPixelGray(image,ScaleLongLongToQuantum(*p),q);
break;
}
default:
break;
}
p++;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ImportQuantumPixel(Image *image,
const RectangleInfo *roi,const char *magick_restrict map,
const QuantumType *quantum_map,const void *pixels,ExceptionInfo *exception)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
p=(const Quantum *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,*p++,q);
SetPixelGreen(image,*p++,q);
SetPixelRed(image,*p++,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,*p++,q);
SetPixelGreen(image,*p++,q);
SetPixelRed(image,*p++,q);
SetPixelAlpha(image,*p++,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,*p++,q);
SetPixelGreen(image,*p++,q);
SetPixelRed(image,*p++,q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelGray(image,*p++,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,*p++,q);
SetPixelGreen(image,*p++,q);
SetPixelBlue(image,*p++,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,*p++,q);
SetPixelGreen(image,*p++,q);
SetPixelBlue(image,*p++,q);
SetPixelAlpha(image,*p++,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,*p++,q);
SetPixelGreen(image,*p++,q);
SetPixelBlue(image,*p++,q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
SetPixelRed(image,*p,q);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
SetPixelGreen(image,*p,q);
break;
}
case BlueQuantum:
case YellowQuantum:
{
SetPixelBlue(image,*p,q);
break;
}
case AlphaQuantum:
{
SetPixelAlpha(image,*p,q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,*p,q);
break;
}
case BlackQuantum:
{
SetPixelBlack(image,*p,q);
break;
}
case IndexQuantum:
{
SetPixelGray(image,*p,q);
break;
}
default:
break;
}
p++;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
static MagickBooleanType ImportShortPixel(Image *image,const RectangleInfo *roi,
const char *magick_restrict map,const QuantumType *quantum_map,
const void *pixels,ExceptionInfo *exception)
{
const unsigned short
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
size_t
length;
ssize_t
y;
p=(const unsigned short *) pixels;
if (LocaleCompare(map,"BGR") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleShortToQuantum(*p++),q);
SetPixelGreen(image,ScaleShortToQuantum(*p++),q);
SetPixelRed(image,ScaleShortToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleShortToQuantum(*p++),q);
SetPixelGreen(image,ScaleShortToQuantum(*p++),q);
SetPixelRed(image,ScaleShortToQuantum(*p++),q);
SetPixelAlpha(image,ScaleShortToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"BGRP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelBlue(image,ScaleShortToQuantum(*p++),q);
SetPixelGreen(image,ScaleShortToQuantum(*p++),q);
SetPixelRed(image,ScaleShortToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"I") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelGray(image,ScaleShortToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGB") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleShortToQuantum(*p++),q);
SetPixelGreen(image,ScaleShortToQuantum(*p++),q);
SetPixelBlue(image,ScaleShortToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBA") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleShortToQuantum(*p++),q);
SetPixelGreen(image,ScaleShortToQuantum(*p++),q);
SetPixelBlue(image,ScaleShortToQuantum(*p++),q);
SetPixelAlpha(image,ScaleShortToQuantum(*p++),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
if (LocaleCompare(map,"RGBP") == 0)
{
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
SetPixelRed(image,ScaleShortToQuantum(*p++),q);
SetPixelGreen(image,ScaleShortToQuantum(*p++),q);
SetPixelBlue(image,ScaleShortToQuantum(*p++),q);
p++;
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
length=strlen(map);
for (y=0; y < (ssize_t) roi->height; y++)
{
q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) roi->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) length; i++)
{
switch (quantum_map[i])
{
case RedQuantum:
case CyanQuantum:
{
SetPixelRed(image,ScaleShortToQuantum(*p),q);
break;
}
case GreenQuantum:
case MagentaQuantum:
{
SetPixelGreen(image,ScaleShortToQuantum(*p),q);
break;
}
case BlueQuantum:
case YellowQuantum:
{
SetPixelBlue(image,ScaleShortToQuantum(*p),q);
break;
}
case AlphaQuantum:
{
SetPixelAlpha(image,ScaleShortToQuantum(*p),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,ScaleShortToQuantum(*p),q);
break;
}
case BlackQuantum:
{
SetPixelBlack(image,ScaleShortToQuantum(*p),q);
break;
}
case IndexQuantum:
{
SetPixelGray(image,ScaleShortToQuantum(*p),q);
break;
}
default:
break;
}
p++;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue);
}
MagickExport MagickBooleanType ImportImagePixels(Image *image,const ssize_t x,
const ssize_t y,const size_t width,const size_t height,const char *map,
const StorageType type,const void *pixels,ExceptionInfo *exception)
{
MagickBooleanType
status;
QuantumType
*quantum_map;
RectangleInfo
roi;
ssize_t
i;
size_t
length;
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=strlen(map);
quantum_map=(QuantumType *) AcquireQuantumMemory(length,sizeof(*quantum_map));
if (quantum_map == (QuantumType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i < (ssize_t) length; i++)
{
switch (map[i])
{
case 'a':
case 'A':
{
quantum_map[i]=AlphaQuantum;
image->alpha_trait=BlendPixelTrait;
break;
}
case 'B':
case 'b':
{
quantum_map[i]=BlueQuantum;
break;
}
case 'C':
case 'c':
{
quantum_map[i]=CyanQuantum;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
case 'g':
case 'G':
{
quantum_map[i]=GreenQuantum;
break;
}
case 'K':
case 'k':
{
quantum_map[i]=BlackQuantum;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
case 'I':
case 'i':
{
quantum_map[i]=IndexQuantum;
(void) SetImageColorspace(image,GRAYColorspace,exception);
break;
}
case 'm':
case 'M':
{
quantum_map[i]=MagentaQuantum;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
case 'O':
case 'o':
{
quantum_map[i]=OpacityQuantum;
image->alpha_trait=BlendPixelTrait;
break;
}
case 'P':
case 'p':
{
quantum_map[i]=UndefinedQuantum;
break;
}
case 'R':
case 'r':
{
quantum_map[i]=RedQuantum;
break;
}
case 'Y':
case 'y':
{
quantum_map[i]=YellowQuantum;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
default:
{
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedPixelMap","`%s'",map);
return(MagickFalse);
}
}
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Transfer the pixels from the pixel data to the image.
*/
roi.width=width;
roi.height=height;
roi.x=x;
roi.y=y;
switch (type)
{
case CharPixel:
{
status=ImportCharPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case DoublePixel:
{
status=ImportDoublePixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case FloatPixel:
{
status=ImportFloatPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case LongPixel:
{
status=ImportLongPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case LongLongPixel:
{
status=ImportLongLongPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case QuantumPixel:
{
status=ImportQuantumPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
case ShortPixel:
{
status=ImportShortPixel(image,&roi,map,quantum_map,pixels,exception);
break;
}
default:
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedStorageType","`%d'",type);
status=MagickFalse;
}
}
quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e P i x e l C h a n n e l M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializePixelChannelMap() defines the standard pixel component map.
%
% The format of the InitializePixelChannelMap() method is:
%
% void InitializePixelChannelMap(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void InitializePixelChannelMap(Image *image)
{
PixelTrait
trait;
ssize_t
i;
ssize_t
n;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
(void) memset(image->channel_map,0,MaxPixelChannels*
sizeof(*image->channel_map));
trait=UpdatePixelTrait;
if (image->alpha_trait != UndefinedPixelTrait)
trait=(PixelTrait) (trait | BlendPixelTrait);
n=0;
if ((image->colorspace == LinearGRAYColorspace) ||
(image->colorspace == GRAYColorspace))
{
SetPixelChannelAttributes(image,BluePixelChannel,trait,n);
SetPixelChannelAttributes(image,GreenPixelChannel,trait,n);
SetPixelChannelAttributes(image,RedPixelChannel,trait,n++);
}
else
{
SetPixelChannelAttributes(image,RedPixelChannel,trait,n++);
SetPixelChannelAttributes(image,GreenPixelChannel,trait,n++);
SetPixelChannelAttributes(image,BluePixelChannel,trait,n++);
}
if (image->colorspace == CMYKColorspace)
SetPixelChannelAttributes(image,BlackPixelChannel,trait,n++);
for (i=0; i < (ssize_t) image->number_meta_channels; i++)
{
SetPixelChannelAttributes(image,(PixelChannel) n,UpdatePixelTrait,n);
n++;
}
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelChannelAttributes(image,AlphaPixelChannel,CopyPixelTrait,n++);
if (image->storage_class == PseudoClass)
SetPixelChannelAttributes(image,IndexPixelChannel,CopyPixelTrait,n++);
if ((image->channels & ReadMaskChannel) != 0)
SetPixelChannelAttributes(image,ReadMaskPixelChannel,CopyPixelTrait,n++);
if ((image->channels & WriteMaskChannel) != 0)
SetPixelChannelAttributes(image,WriteMaskPixelChannel,CopyPixelTrait,n++);
if ((image->channels & CompositeMaskChannel) != 0)
SetPixelChannelAttributes(image,CompositeMaskPixelChannel,CopyPixelTrait,
n++);
image->number_channels=(size_t) n;
(void) SetPixelChannelMask(image,image->channel_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t e P i x e l C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolatePixelChannel() applies a pixel interpolation method between a
% floating point coordinate and the pixels surrounding that coordinate. No
% pixel area resampling, or scaling of the result is performed.
%
% Interpolation is restricted to just the specified channel.
%
% The format of the InterpolatePixelChannel method is:
%
% MagickBooleanType InterpolatePixelChannel(
% const Image *magick_restrict image,const CacheView *image_view,
% const PixelChannel channel,const PixelInterpolateMethod method,
% const double x,const double y,double *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o image_view: the image view.
%
% o channel: the pixel channel to interpolate.
%
% o method: the pixel color interpolation method.
%
% o x,y: A double representing the current (x,y) position of the pixel.
%
% o pixel: return the interpolated pixel here.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void CatromWeights(const double x,double (*weights)[4])
{
double
alpha,
beta,
gamma;
/*
Nicolas Robidoux' 10 flops (4* + 5- + 1+) refactoring of the computation
of the standard four 1D Catmull-Rom weights. The sampling location is
assumed between the second and third input pixel locations, and x is the
position relative to the second input pixel location. Formulas originally
derived for the VIPS (Virtual Image Processing System) library.
*/
alpha=(double) 1.0-x;
beta=(double) (-0.5)*x*alpha;
(*weights)[0]=alpha*beta;
(*weights)[3]=x*beta;
/*
The following computation of the inner weights from the outer ones work
for all Keys cubics.
*/
gamma=(*weights)[3]-(*weights)[0];
(*weights)[1]=alpha-(*weights)[0]+gamma;
(*weights)[2]=x-(*weights)[3]-gamma;
}
static inline void SplineWeights(const double x,double (*weights)[4])
{
double
alpha,
beta;
/*
Nicolas Robidoux' 12 flops (6* + 5- + 1+) refactoring of the computation
of the standard four 1D cubic B-spline smoothing weights. The sampling
location is assumed between the second and third input pixel locations,
and x is the position relative to the second input pixel location.
*/
alpha=(double) 1.0-x;
(*weights)[3]=(double) (1.0/6.0)*x*x*x;
(*weights)[0]=(double) (1.0/6.0)*alpha*alpha*alpha;
beta=(*weights)[3]-(*weights)[0];
(*weights)[1]=alpha-(*weights)[0]+beta;
(*weights)[2]=x-(*weights)[3]-beta;
}
static inline double MeshInterpolate(const PointInfo *delta,const double p,
const double x,const double y)
{
return(delta->x*x+delta->y*y+(1.0-delta->x-delta->y)*p);
}
MagickExport MagickBooleanType InterpolatePixelChannel(
const Image *magick_restrict image,const CacheView_ *image_view,
const PixelChannel channel,const PixelInterpolateMethod method,
const double x,const double y,double *pixel,ExceptionInfo *exception)
{
double
alpha[16],
gamma,
pixels[16];
MagickBooleanType
status;
PixelInterpolateMethod
interpolate;
PixelTrait
traits;
const Quantum
*magick_restrict p;
ssize_t
i;
ssize_t
x_offset,
y_offset;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image_view != (CacheView *) NULL);
status=MagickTrue;
*pixel=0.0;
traits=GetPixelChannelTraits(image,channel);
x_offset=CastDoubleToLong(floor(x));
y_offset=CastDoubleToLong(floor(y));
interpolate=method;
if (interpolate == UndefinedInterpolatePixel)
interpolate=image->interpolate;
switch (interpolate)
{
case AverageInterpolatePixel: /* nearest 4 neighbours */
case Average9InterpolatePixel: /* nearest 9 neighbours */
case Average16InterpolatePixel: /* nearest 16 neighbours */
{
ssize_t
count;
count=2; /* size of the area to average - default nearest 4 */
if (interpolate == Average9InterpolatePixel)
{
count=3;
x_offset=CastDoubleToLong(floor(x+0.5)-1.0);
y_offset=CastDoubleToLong(floor(y+0.5)-1.0);
}
else
if (interpolate == Average16InterpolatePixel)
{
count=4;
x_offset--;
y_offset--;
}
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,(size_t) count,
(size_t) count,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
count*=count; /* Number of pixels to average */
if ((traits & BlendPixelTrait) == 0)
for (i=0; i < (ssize_t) count; i++)
{
alpha[i]=1.0;
pixels[i]=(double) p[i*GetPixelChannels(image)+channel];
}
else
for (i=0; i < (ssize_t) count; i++)
{
alpha[i]=QuantumScale*GetPixelAlpha(image,p+i*
GetPixelChannels(image));
pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel];
}
for (i=0; i < (ssize_t) count; i++)
{
gamma=PerceptibleReciprocal(alpha[i])/count;
*pixel+=gamma*pixels[i];
}
break;
}
case BilinearInterpolatePixel:
default:
{
PointInfo
delta,
epsilon;
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
if ((traits & BlendPixelTrait) == 0)
for (i=0; i < 4; i++)
{
alpha[i]=1.0;
pixels[i]=(double) p[i*GetPixelChannels(image)+channel];
}
else
for (i=0; i < 4; i++)
{
alpha[i]=QuantumScale*GetPixelAlpha(image,p+i*
GetPixelChannels(image));
pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel];
}
delta.x=x-x_offset;
delta.y=y-y_offset;
epsilon.x=1.0-delta.x;
epsilon.y=1.0-delta.y;
gamma=((epsilon.y*(epsilon.x*alpha[0]+delta.x*alpha[1])+delta.y*
(epsilon.x*alpha[2]+delta.x*alpha[3])));
gamma=PerceptibleReciprocal(gamma);
*pixel=gamma*(epsilon.y*(epsilon.x*pixels[0]+delta.x*pixels[1])+delta.y*
(epsilon.x*pixels[2]+delta.x*pixels[3]));
break;
}
case BlendInterpolatePixel:
{
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
if ((traits & BlendPixelTrait) == 0)
for (i=0; i < 4; i++)
{
alpha[i]=1.0;
pixels[i]=(MagickRealType) p[i*GetPixelChannels(image)+channel];
}
else
for (i=0; i < 4; i++)
{
alpha[i]=QuantumScale*GetPixelAlpha(image,p+i*
GetPixelChannels(image));
pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel];
}
gamma=1.0; /* number of pixels blended together (its variable) */
for (i=0; i <= 1L; i++) {
if ((y-y_offset) >= 0.75)
{
alpha[i]=alpha[i+2]; /* take right pixels */
pixels[i]=pixels[i+2];
}
else
if ((y-y_offset) > 0.25)
{
gamma=2.0; /* blend both pixels in row */
alpha[i]+=alpha[i+2]; /* add up alpha weights */
pixels[i]+=pixels[i+2];
}
}
if ((x-x_offset) >= 0.75)
{
alpha[0]=alpha[1]; /* take bottom row blend */
pixels[0]=pixels[1];
}
else
if ((x-x_offset) > 0.25)
{
gamma*=2.0; /* blend both rows */
alpha[0]+=alpha[1]; /* add up alpha weights */
pixels[0]+=pixels[1];
}
if (channel != AlphaPixelChannel)
gamma=PerceptibleReciprocal(alpha[0]); /* (color) 1/alpha_weights */
else
gamma=PerceptibleReciprocal(gamma); /* (alpha) 1/number_of_pixels */
*pixel=gamma*pixels[0];
break;
}
case CatromInterpolatePixel:
{
double
cx[4],
cy[4];
p=GetCacheViewVirtualPixels(image_view,x_offset-1,y_offset-1,4,4,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
if ((traits & BlendPixelTrait) == 0)
for (i=0; i < 16; i++)
{
alpha[i]=1.0;
pixels[i]=(double) p[i*GetPixelChannels(image)+channel];
}
else
for (i=0; i < 16; i++)
{
alpha[i]=QuantumScale*GetPixelAlpha(image,p+i*
GetPixelChannels(image));
pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel];
}
CatromWeights((double) (x-x_offset),&cx);
CatromWeights((double) (y-y_offset),&cy);
gamma=(channel == AlphaPixelChannel ? (double) 1.0 :
PerceptibleReciprocal(cy[0]*(cx[0]*alpha[0]+cx[1]*alpha[1]+cx[2]*
alpha[2]+cx[3]*alpha[3])+cy[1]*(cx[0]*alpha[4]+cx[1]*alpha[5]+cx[2]*
alpha[6]+cx[3]*alpha[7])+cy[2]*(cx[0]*alpha[8]+cx[1]*alpha[9]+cx[2]*
alpha[10]+cx[3]*alpha[11])+cy[3]*(cx[0]*alpha[12]+cx[1]*alpha[13]+
cx[2]*alpha[14]+cx[3]*alpha[15])));
*pixel=gamma*(cy[0]*(cx[0]*pixels[0]+cx[1]*pixels[1]+cx[2]*pixels[2]+
cx[3]*pixels[3])+cy[1]*(cx[0]*pixels[4]+cx[1]*pixels[5]+cx[2]*
pixels[6]+cx[3]*pixels[7])+cy[2]*(cx[0]*pixels[8]+cx[1]*pixels[9]+
cx[2]*pixels[10]+cx[3]*pixels[11])+cy[3]*(cx[0]*pixels[12]+cx[1]*
pixels[13]+cx[2]*pixels[14]+cx[3]*pixels[15]));
break;
}
case IntegerInterpolatePixel:
{
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
*pixel=(double) GetPixelChannel(image,channel,p);
break;
}
case NearestInterpolatePixel:
{
x_offset=CastDoubleToLong(floor(x+0.5));
y_offset=CastDoubleToLong(floor(y+0.5));
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
*pixel=(double) GetPixelChannel(image,channel,p);
break;
}
case MeshInterpolatePixel:
{
PointInfo
delta,
luminance;
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
if ((traits & BlendPixelTrait) == 0)
for (i=0; i < 4; i++)
{
alpha[i]=1.0;
pixels[i]=(double) p[i*GetPixelChannels(image)+channel];
}
else
for (i=0; i < 4; i++)
{
alpha[i]=QuantumScale*GetPixelAlpha(image,p+i*
GetPixelChannels(image));
pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel];
}
delta.x=x-x_offset;
delta.y=y-y_offset;
luminance.x=GetPixelLuma(image,p)-(double)
GetPixelLuma(image,p+3*GetPixelChannels(image));
luminance.y=GetPixelLuma(image,p+GetPixelChannels(image))-(double)
GetPixelLuma(image,p+2*GetPixelChannels(image));
if (fabs((double) luminance.x) < fabs((double) luminance.y))
{
/*
Diagonal 0-3 NW-SE.
*/
if (delta.x <= delta.y)
{
/*
Bottom-left triangle (pixel: 2, diagonal: 0-3).
*/
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]);
gamma=PerceptibleReciprocal(gamma);
*pixel=gamma*MeshInterpolate(&delta,pixels[2],pixels[3],
pixels[0]);
}
else
{
/*
Top-right triangle (pixel: 1, diagonal: 0-3).
*/
delta.x=1.0-delta.x;
gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]);
gamma=PerceptibleReciprocal(gamma);
*pixel=gamma*MeshInterpolate(&delta,pixels[1],pixels[0],
pixels[3]);
}
}
else
{
/*
Diagonal 1-2 NE-SW.
*/
if (delta.x <= (1.0-delta.y))
{
/*
Top-left triangle (pixel: 0, diagonal: 1-2).
*/
gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]);
gamma=PerceptibleReciprocal(gamma);
*pixel=gamma*MeshInterpolate(&delta,pixels[0],pixels[1],
pixels[2]);
}
else
{
/*
Bottom-right triangle (pixel: 3, diagonal: 1-2).
*/
delta.x=1.0-delta.x;
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]);
gamma=PerceptibleReciprocal(gamma);
*pixel=gamma*MeshInterpolate(&delta,pixels[3],pixels[2],
pixels[1]);
}
}
break;
}
case SplineInterpolatePixel:
{
double
cx[4],
cy[4];
p=GetCacheViewVirtualPixels(image_view,x_offset-1,y_offset-1,4,4,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
if ((traits & BlendPixelTrait) == 0)
for (i=0; i < 16; i++)
{
alpha[i]=1.0;
pixels[i]=(double) p[i*GetPixelChannels(image)+channel];
}
else
for (i=0; i < 16; i++)
{
alpha[i]=QuantumScale*GetPixelAlpha(image,p+i*
GetPixelChannels(image));
pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel];
}
SplineWeights((double) (x-x_offset),&cx);
SplineWeights((double) (y-y_offset),&cy);
gamma=(channel == AlphaPixelChannel ? (double) 1.0 :
PerceptibleReciprocal(cy[0]*(cx[0]*alpha[0]+cx[1]*alpha[1]+cx[2]*
alpha[2]+cx[3]*alpha[3])+cy[1]*(cx[0]*alpha[4]+cx[1]*alpha[5]+cx[2]*
alpha[6]+cx[3]*alpha[7])+cy[2]*(cx[0]*alpha[8]+cx[1]*alpha[9]+cx[2]*
alpha[10]+cx[3]*alpha[11])+cy[3]*(cx[0]*alpha[12]+cx[1]*alpha[13]+
cx[2]*alpha[14]+cx[3]*alpha[15])));
*pixel=gamma*(cy[0]*(cx[0]*pixels[0]+cx[1]*pixels[1]+cx[2]*pixels[2]+
cx[3]*pixels[3])+cy[1]*(cx[0]*pixels[4]+cx[1]*pixels[5]+cx[2]*
pixels[6]+cx[3]*pixels[7])+cy[2]*(cx[0]*pixels[8]+cx[1]*pixels[9]+
cx[2]*pixels[10]+cx[3]*pixels[11])+cy[3]*(cx[0]*pixels[12]+cx[1]*
pixels[13]+cx[2]*pixels[14]+cx[3]*pixels[15]));
break;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t e P i x e l C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolatePixelChannels() applies a pixel interpolation method between a
% floating point coordinate and the pixels surrounding that coordinate. No
% pixel area resampling, or scaling of the result is performed.
%
% Interpolation is restricted to just the current channel setting of the
% destination image into which the color is to be stored
%
% The format of the InterpolatePixelChannels method is:
%
% MagickBooleanType InterpolatePixelChannels(
% const Image *magick_restrict source,const CacheView *source_view,
% const Image *magick_restrict destination,
% const PixelInterpolateMethod method,const double x,const double y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o source: the source.
%
% o source_view: the source view.
%
% o destination: the destination image, for the interpolated color
%
% o method: the pixel color interpolation method.
%
% o x,y: A double representing the current (x,y) position of the pixel.
%
% o pixel: return the interpolated pixel here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType InterpolatePixelChannels(
const Image *magick_restrict source,const CacheView_ *source_view,
const Image *magick_restrict destination,const PixelInterpolateMethod method,
const double x,const double y,Quantum *pixel,ExceptionInfo *exception)
{
MagickBooleanType
status;
double
alpha[16],
gamma,
pixels[16];
const Quantum
*magick_restrict p;
ssize_t
i;
ssize_t
x_offset,
y_offset;
PixelInterpolateMethod
interpolate;
assert(source != (Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(source_view != (CacheView *) NULL);
status=MagickTrue;
x_offset=CastDoubleToLong(floor(x));
y_offset=CastDoubleToLong(floor(y));
interpolate=method;
if (interpolate == UndefinedInterpolatePixel)
interpolate=source->interpolate;
switch (interpolate)
{
case AverageInterpolatePixel: /* nearest 4 neighbours */
case Average9InterpolatePixel: /* nearest 9 neighbours */
case Average16InterpolatePixel: /* nearest 16 neighbours */
{
ssize_t
count;
count=2; /* size of the area to average - default nearest 4 */
if (interpolate == Average9InterpolatePixel)
{
count=3;
x_offset=CastDoubleToLong(floor(x+0.5)-1.0);
y_offset=CastDoubleToLong(floor(y+0.5)-1.0);
}
else
if (interpolate == Average16InterpolatePixel)
{
count=4;
x_offset--;
y_offset--;
}
p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,(size_t) count,
(size_t) count,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
count*=count; /* Number of pixels to average */
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
double
sum;
ssize_t
j;
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
for (j=0; j < (ssize_t) count; j++)
pixels[j]=(double) p[j*GetPixelChannels(source)+i];
sum=0.0;
if ((traits & BlendPixelTrait) == 0)
{
for (j=0; j < (ssize_t) count; j++)
sum+=pixels[j];
sum/=count;
SetPixelChannel(destination,channel,ClampToQuantum(sum),pixel);
continue;
}
for (j=0; j < (ssize_t) count; j++)
{
alpha[j]=QuantumScale*GetPixelAlpha(source,p+j*
GetPixelChannels(source));
pixels[j]*=alpha[j];
gamma=PerceptibleReciprocal(alpha[j]);
sum+=gamma*pixels[j];
}
sum/=count;
SetPixelChannel(destination,channel,ClampToQuantum(sum),pixel);
}
break;
}
case BilinearInterpolatePixel:
default:
{
p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PointInfo
delta,
epsilon;
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
delta.x=x-x_offset;
delta.y=y-y_offset;
epsilon.x=1.0-delta.x;
epsilon.y=1.0-delta.y;
pixels[0]=(double) p[i];
pixels[1]=(double) p[GetPixelChannels(source)+i];
pixels[2]=(double) p[2*GetPixelChannels(source)+i];
pixels[3]=(double) p[3*GetPixelChannels(source)+i];
if ((traits & BlendPixelTrait) == 0)
{
gamma=((epsilon.y*(epsilon.x+delta.x)+delta.y*(epsilon.x+delta.x)));
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(destination,channel,ClampToQuantum(gamma*(epsilon.y*
(epsilon.x*pixels[0]+delta.x*pixels[1])+delta.y*(epsilon.x*
pixels[2]+delta.x*pixels[3]))),pixel);
continue;
}
alpha[0]=QuantumScale*GetPixelAlpha(source,p);
alpha[1]=QuantumScale*GetPixelAlpha(source,p+GetPixelChannels(source));
alpha[2]=QuantumScale*GetPixelAlpha(source,p+2*
GetPixelChannels(source));
alpha[3]=QuantumScale*GetPixelAlpha(source,p+3*
GetPixelChannels(source));
pixels[0]*=alpha[0];
pixels[1]*=alpha[1];
pixels[2]*=alpha[2];
pixels[3]*=alpha[3];
gamma=((epsilon.y*(epsilon.x*alpha[0]+delta.x*alpha[1])+delta.y*
(epsilon.x*alpha[2]+delta.x*alpha[3])));
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(destination,channel,ClampToQuantum(gamma*(epsilon.y*
(epsilon.x*pixels[0]+delta.x*pixels[1])+delta.y*(epsilon.x*pixels[2]+
delta.x*pixels[3]))),pixel);
}
break;
}
case BlendInterpolatePixel:
{
p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
ssize_t
j;
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
if (source->alpha_trait != BlendPixelTrait)
for (j=0; j < 4; j++)
{
alpha[j]=1.0;
pixels[j]=(double) p[j*GetPixelChannels(source)+i];
}
else
for (j=0; j < 4; j++)
{
alpha[j]=QuantumScale*GetPixelAlpha(source,p+j*
GetPixelChannels(source));
pixels[j]=(double) p[j*GetPixelChannels(source)+i];
if (channel != AlphaPixelChannel)
pixels[j]*=alpha[j];
}
gamma=1.0; /* number of pixels blended together (its variable) */
for (j=0; j <= 1L; j++)
{
if ((y-y_offset) >= 0.75)
{
alpha[j]=alpha[j+2]; /* take right pixels */
pixels[j]=pixels[j+2];
}
else
if ((y-y_offset) > 0.25)
{
gamma=2.0; /* blend both pixels in row */
alpha[j]+=alpha[j+2]; /* add up alpha weights */
pixels[j]+=pixels[j+2];
}
}
if ((x-x_offset) >= 0.75)
{
alpha[0]=alpha[1]; /* take bottom row blend */
pixels[0]=pixels[1];
}
else
if ((x-x_offset) > 0.25)
{
gamma*=2.0; /* blend both rows */
alpha[0]+=alpha[1]; /* add up alpha weights */
pixels[0]+=pixels[1];
}
if (channel != AlphaPixelChannel)
gamma=PerceptibleReciprocal(alpha[0]); /* (color) 1/alpha_weights */
else
gamma=PerceptibleReciprocal(gamma); /* (alpha) 1/number_of_pixels */
SetPixelChannel(destination,channel,ClampToQuantum(gamma*pixels[0]),
pixel);
}
break;
}
case CatromInterpolatePixel:
{
double
cx[4],
cy[4];
p=GetCacheViewVirtualPixels(source_view,x_offset-1,y_offset-1,4,4,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
ssize_t
j;
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
for (j=0; j < 16; j++)
{
alpha[j]=1.0;
pixels[j]=(double) p[j*GetPixelChannels(source)+i];
}
else
for (j=0; j < 16; j++)
{
alpha[j]=QuantumScale*GetPixelAlpha(source,p+j*
GetPixelChannels(source));
pixels[j]=alpha[j]*p[j*GetPixelChannels(source)+i];
}
CatromWeights((double) (x-x_offset),&cx);
CatromWeights((double) (y-y_offset),&cy);
gamma=((traits & BlendPixelTrait) ? (double) (1.0) :
PerceptibleReciprocal(cy[0]*(cx[0]*alpha[0]+cx[1]*alpha[1]+cx[2]*
alpha[2]+cx[3]*alpha[3])+cy[1]*(cx[0]*alpha[4]+cx[1]*alpha[5]+cx[2]*
alpha[6]+cx[3]*alpha[7])+cy[2]*(cx[0]*alpha[8]+cx[1]*alpha[9]+cx[2]*
alpha[10]+cx[3]*alpha[11])+cy[3]*(cx[0]*alpha[12]+cx[1]*alpha[13]+
cx[2]*alpha[14]+cx[3]*alpha[15])));
SetPixelChannel(destination,channel,ClampToQuantum(gamma*(cy[0]*(cx[0]*
pixels[0]+cx[1]*pixels[1]+cx[2]*pixels[2]+cx[3]*pixels[3])+cy[1]*
(cx[0]*pixels[4]+cx[1]*pixels[5]+cx[2]*pixels[6]+cx[3]*pixels[7])+
cy[2]*(cx[0]*pixels[8]+cx[1]*pixels[9]+cx[2]*pixels[10]+cx[3]*
pixels[11])+cy[3]*(cx[0]*pixels[12]+cx[1]*pixels[13]+cx[2]*
pixels[14]+cx[3]*pixels[15]))),pixel);
}
break;
}
case IntegerInterpolatePixel:
{
p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(destination,channel,p[i],pixel);
}
break;
}
case NearestInterpolatePixel:
{
x_offset=CastDoubleToLong(floor(x+0.5));
y_offset=CastDoubleToLong(floor(y+0.5));
p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(destination,channel,p[i],pixel);
}
break;
}
case MeshInterpolatePixel:
{
p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PointInfo
delta,
luminance;
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
pixels[0]=(double) p[i];
pixels[1]=(double) p[GetPixelChannels(source)+i];
pixels[2]=(double) p[2*GetPixelChannels(source)+i];
pixels[3]=(double) p[3*GetPixelChannels(source)+i];
if ((traits & BlendPixelTrait) == 0)
{
alpha[0]=1.0;
alpha[1]=1.0;
alpha[2]=1.0;
alpha[3]=1.0;
}
else
{
alpha[0]=QuantumScale*GetPixelAlpha(source,p);
alpha[1]=QuantumScale*GetPixelAlpha(source,p+
GetPixelChannels(source));
alpha[2]=QuantumScale*GetPixelAlpha(source,p+2*
GetPixelChannels(source));
alpha[3]=QuantumScale*GetPixelAlpha(source,p+3*
GetPixelChannels(source));
}
delta.x=x-x_offset;
delta.y=y-y_offset;
luminance.x=fabs((double) (GetPixelLuma(source,p)-
GetPixelLuma(source,p+3*GetPixelChannels(source))));
luminance.y=fabs((double) (GetPixelLuma(source,p+
GetPixelChannels(source))-GetPixelLuma(source,p+2*
GetPixelChannels(source))));
if (luminance.x < luminance.y)
{
/*
Diagonal 0-3 NW-SE.
*/
if (delta.x <= delta.y)
{
/*
Bottom-left triangle (pixel: 2, diagonal: 0-3).
*/
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]);
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(destination,channel,ClampToQuantum(gamma*
MeshInterpolate(&delta,pixels[2],pixels[3],pixels[0])),pixel);
}
else
{
/*
Top-right triangle (pixel: 1, diagonal: 0-3).
*/
delta.x=1.0-delta.x;
gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]);
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(destination,channel,ClampToQuantum(gamma*
MeshInterpolate(&delta,pixels[1],pixels[0],pixels[3])),pixel);
}
}
else
{
/*
Diagonal 1-2 NE-SW.
*/
if (delta.x <= (1.0-delta.y))
{
/*
Top-left triangle (pixel: 0, diagonal: 1-2).
*/
gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]);
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(destination,channel,ClampToQuantum(gamma*
MeshInterpolate(&delta,pixels[0],pixels[1],pixels[2])),pixel);
}
else
{
/*
Bottom-right triangle (pixel: 3, diagonal: 1-2).
*/
delta.x=1.0-delta.x;
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]);
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(destination,channel,ClampToQuantum(gamma*
MeshInterpolate(&delta,pixels[3],pixels[2],pixels[1])),pixel);
}
}
}
break;
}
case SplineInterpolatePixel:
{
double
cx[4],
cy[4];
p=GetCacheViewVirtualPixels(source_view,x_offset-1,y_offset-1,4,4,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
ssize_t
j;
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait traits = GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
for (j=0; j < 16; j++)
{
alpha[j]=1.0;
pixels[j]=(double) p[j*GetPixelChannels(source)+i];
}
else
for (j=0; j < 16; j++)
{
alpha[j]=QuantumScale*GetPixelAlpha(source,p+j*
GetPixelChannels(source));
pixels[j]=alpha[j]*p[j*GetPixelChannels(source)+i];
}
SplineWeights((double) (x-x_offset),&cx);
SplineWeights((double) (y-y_offset),&cy);
gamma=((traits & BlendPixelTrait) ? (double) (1.0) :
PerceptibleReciprocal(cy[0]*(cx[0]*alpha[0]+cx[1]*alpha[1]+cx[2]*
alpha[2]+cx[3]*alpha[3])+cy[1]*(cx[0]*alpha[4]+cx[1]*alpha[5]+cx[2]*
alpha[6]+cx[3]*alpha[7])+cy[2]*(cx[0]*alpha[8]+cx[1]*alpha[9]+cx[2]*
alpha[10]+cx[3]*alpha[11])+cy[3]*(cx[0]*alpha[12]+cx[1]*alpha[13]+
cx[2]*alpha[14]+cx[3]*alpha[15])));
SetPixelChannel(destination,channel,ClampToQuantum(gamma*(cy[0]*(cx[0]*
pixels[0]+cx[1]*pixels[1]+cx[2]*pixels[2]+cx[3]*pixels[3])+cy[1]*
(cx[0]*pixels[4]+cx[1]*pixels[5]+cx[2]*pixels[6]+cx[3]*pixels[7])+
cy[2]*(cx[0]*pixels[8]+cx[1]*pixels[9]+cx[2]*pixels[10]+cx[3]*
pixels[11])+cy[3]*(cx[0]*pixels[12]+cx[1]*pixels[13]+cx[2]*
pixels[14]+cx[3]*pixels[15]))),pixel);
}
break;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t e P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolatePixelInfo() applies a pixel interpolation method between a
% floating point coordinate and the pixels surrounding that coordinate. No
% pixel area resampling, or scaling of the result is performed.
%
% Interpolation is restricted to just RGBKA channels.
%
% The format of the InterpolatePixelInfo method is:
%
% MagickBooleanType InterpolatePixelInfo(const Image *image,
% const CacheView *image_view,const PixelInterpolateMethod method,
% const double x,const double y,PixelInfo *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o image_view: the image view.
%
% o method: the pixel color interpolation method.
%
% o x,y: A double representing the current (x,y) position of the pixel.
%
% o pixel: return the interpolated pixel here.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void AlphaBlendPixelInfo(const Image *image,
const Quantum *pixel,PixelInfo *pixel_info,double *alpha)
{
if (image->alpha_trait == UndefinedPixelTrait)
{
*alpha=1.0;
pixel_info->red=(double) GetPixelRed(image,pixel);
pixel_info->green=(double) GetPixelGreen(image,pixel);
pixel_info->blue=(double) GetPixelBlue(image,pixel);
pixel_info->black=0.0;
if (image->colorspace == CMYKColorspace)
pixel_info->black=(double) GetPixelBlack(image,pixel);
pixel_info->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
*alpha=QuantumScale*GetPixelAlpha(image,pixel);
pixel_info->red=(*alpha*GetPixelRed(image,pixel));
pixel_info->green=(*alpha*GetPixelGreen(image,pixel));
pixel_info->blue=(*alpha*GetPixelBlue(image,pixel));
pixel_info->black=0.0;
if (image->colorspace == CMYKColorspace)
pixel_info->black=(*alpha*GetPixelBlack(image,pixel));
pixel_info->alpha=(double) GetPixelAlpha(image,pixel);
}
MagickExport MagickBooleanType InterpolatePixelInfo(const Image *image,
const CacheView_ *image_view,const PixelInterpolateMethod method,
const double x,const double y,PixelInfo *pixel,ExceptionInfo *exception)
{
MagickBooleanType
status;
double
alpha[16],
gamma;
PixelInfo
pixels[16];
const Quantum
*p;
ssize_t
i;
ssize_t
x_offset,
y_offset;
PixelInterpolateMethod
interpolate;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image_view != (CacheView *) NULL);
status=MagickTrue;
x_offset=CastDoubleToLong(floor(x));
y_offset=CastDoubleToLong(floor(y));
interpolate=method;
if (interpolate == UndefinedInterpolatePixel)
interpolate=image->interpolate;
GetPixelInfoPixel(image,(const Quantum *) NULL,pixel);
(void) memset(&pixels,0,sizeof(pixels));
switch (interpolate)
{
case AverageInterpolatePixel: /* nearest 4 neighbours */
case Average9InterpolatePixel: /* nearest 9 neighbours */
case Average16InterpolatePixel: /* nearest 16 neighbours */
{
ssize_t
count;
count=2; /* size of the area to average - default nearest 4 */
if (interpolate == Average9InterpolatePixel)
{
count=3;
x_offset=CastDoubleToLong(floor(x+0.5)-1.0);
y_offset=CastDoubleToLong(floor(y+0.5)-1.0);
}
else if (interpolate == Average16InterpolatePixel)
{
count=4;
x_offset--;
y_offset--;
}
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,(size_t) count,
(size_t) count,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
count*=count; /* number of pixels - square of size */
for (i=0; i < (ssize_t) count; i++)
{
AlphaBlendPixelInfo(image,p,pixels,alpha);
gamma=PerceptibleReciprocal(alpha[0]);
pixel->red+=gamma*pixels[0].red;
pixel->green+=gamma*pixels[0].green;
pixel->blue+=gamma*pixels[0].blue;
pixel->black+=gamma*pixels[0].black;
pixel->alpha+=pixels[0].alpha;
p += GetPixelChannels(image);
}
gamma=1.0/count; /* average weighting of each pixel in area */
pixel->red*=gamma;
pixel->green*=gamma;
pixel->blue*=gamma;
pixel->black*=gamma;
pixel->alpha*=gamma;
break;
}
case BackgroundInterpolatePixel:
{
*pixel=image->background_color; /* Copy PixelInfo Structure */
break;
}
case BilinearInterpolatePixel:
default:
{
PointInfo
delta,
epsilon;
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < 4L; i++)
AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i);
delta.x=x-x_offset;
delta.y=y-y_offset;
epsilon.x=1.0-delta.x;
epsilon.y=1.0-delta.y;
gamma=((epsilon.y*(epsilon.x*alpha[0]+delta.x*alpha[1])+delta.y*
(epsilon.x*alpha[2]+delta.x*alpha[3])));
gamma=PerceptibleReciprocal(gamma);
pixel->red=gamma*(epsilon.y*(epsilon.x*pixels[0].red+delta.x*
pixels[1].red)+delta.y*(epsilon.x*pixels[2].red+delta.x*pixels[3].red));
pixel->green=gamma*(epsilon.y*(epsilon.x*pixels[0].green+delta.x*
pixels[1].green)+delta.y*(epsilon.x*pixels[2].green+delta.x*
pixels[3].green));
pixel->blue=gamma*(epsilon.y*(epsilon.x*pixels[0].blue+delta.x*
pixels[1].blue)+delta.y*(epsilon.x*pixels[2].blue+delta.x*
pixels[3].blue));
if (image->colorspace == CMYKColorspace)
pixel->black=gamma*(epsilon.y*(epsilon.x*pixels[0].black+delta.x*
pixels[1].black)+delta.y*(epsilon.x*pixels[2].black+delta.x*
pixels[3].black));
gamma=((epsilon.y*(epsilon.x+delta.x)+delta.y*(epsilon.x+delta.x)));
gamma=PerceptibleReciprocal(gamma);
pixel->alpha=gamma*(epsilon.y*(epsilon.x*pixels[0].alpha+delta.x*
pixels[1].alpha)+delta.y*(epsilon.x*pixels[2].alpha+delta.x*
pixels[3].alpha));
break;
}
case BlendInterpolatePixel:
{
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < 4L; i++)
{
GetPixelInfoPixel(image,p+i*GetPixelChannels(image),pixels+i);
AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i);
}
gamma=1.0; /* number of pixels blended together (its variable) */
for (i=0; i <= 1L; i++)
{
if ((y-y_offset) >= 0.75)
{
alpha[i]=alpha[i+2]; /* take right pixels */
pixels[i]=pixels[i+2];
}
else
if ((y-y_offset) > 0.25)
{
gamma=2.0; /* blend both pixels in row */
alpha[i]+=alpha[i+2]; /* add up alpha weights */
pixels[i].red+=pixels[i+2].red;
pixels[i].green+=pixels[i+2].green;
pixels[i].blue+=pixels[i+2].blue;
pixels[i].black+=pixels[i+2].black;
pixels[i].alpha+=pixels[i+2].alpha;
}
}
if ((x-x_offset) >= 0.75)
{
alpha[0]=alpha[1];
pixels[0]=pixels[1];
}
else
if ((x-x_offset) > 0.25)
{
gamma*=2.0; /* blend both rows */
alpha[0]+= alpha[1]; /* add up alpha weights */
pixels[0].red+=pixels[1].red;
pixels[0].green+=pixels[1].green;
pixels[0].blue+=pixels[1].blue;
pixels[0].black+=pixels[1].black;
pixels[0].alpha+=pixels[1].alpha;
}
gamma=1.0/gamma;
alpha[0]=PerceptibleReciprocal(alpha[0]);
pixel->red=alpha[0]*pixels[0].red;
pixel->green=alpha[0]*pixels[0].green; /* divide by sum of alpha */
pixel->blue=alpha[0]*pixels[0].blue;
pixel->black=alpha[0]*pixels[0].black;
pixel->alpha=gamma*pixels[0].alpha; /* divide by number of pixels */
break;
}
case CatromInterpolatePixel:
{
double
cx[4],
cy[4];
p=GetCacheViewVirtualPixels(image_view,x_offset-1,y_offset-1,4,4,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < 16L; i++)
AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i);
CatromWeights((double) (x-x_offset),&cx);
CatromWeights((double) (y-y_offset),&cy);
pixel->red=(cy[0]*(cx[0]*pixels[0].red+cx[1]*pixels[1].red+cx[2]*
pixels[2].red+cx[3]*pixels[3].red)+cy[1]*(cx[0]*pixels[4].red+cx[1]*
pixels[5].red+cx[2]*pixels[6].red+cx[3]*pixels[7].red)+cy[2]*(cx[0]*
pixels[8].red+cx[1]*pixels[9].red+cx[2]*pixels[10].red+cx[3]*
pixels[11].red)+cy[3]*(cx[0]*pixels[12].red+cx[1]*pixels[13].red+cx[2]*
pixels[14].red+cx[3]*pixels[15].red));
pixel->green=(cy[0]*(cx[0]*pixels[0].green+cx[1]*pixels[1].green+cx[2]*
pixels[2].green+cx[3]*pixels[3].green)+cy[1]*(cx[0]*pixels[4].green+
cx[1]*pixels[5].green+cx[2]*pixels[6].green+cx[3]*pixels[7].green)+
cy[2]*(cx[0]*pixels[8].green+cx[1]*pixels[9].green+cx[2]*
pixels[10].green+cx[3]*pixels[11].green)+cy[3]*(cx[0]*
pixels[12].green+cx[1]*pixels[13].green+cx[2]*pixels[14].green+cx[3]*
pixels[15].green));
pixel->blue=(cy[0]*(cx[0]*pixels[0].blue+cx[1]*pixels[1].blue+cx[2]*
pixels[2].blue+cx[3]*pixels[3].blue)+cy[1]*(cx[0]*pixels[4].blue+cx[1]*
pixels[5].blue+cx[2]*pixels[6].blue+cx[3]*pixels[7].blue)+cy[2]*(cx[0]*
pixels[8].blue+cx[1]*pixels[9].blue+cx[2]*pixels[10].blue+cx[3]*
pixels[11].blue)+cy[3]*(cx[0]*pixels[12].blue+cx[1]*pixels[13].blue+
cx[2]*pixels[14].blue+cx[3]*pixels[15].blue));
if (image->colorspace == CMYKColorspace)
pixel->black=(cy[0]*(cx[0]*pixels[0].black+cx[1]*pixels[1].black+cx[2]*
pixels[2].black+cx[3]*pixels[3].black)+cy[1]*(cx[0]*pixels[4].black+
cx[1]*pixels[5].black+cx[2]*pixels[6].black+cx[3]*pixels[7].black)+
cy[2]*(cx[0]*pixels[8].black+cx[1]*pixels[9].black+cx[2]*
pixels[10].black+cx[3]*pixels[11].black)+cy[3]*(cx[0]*
pixels[12].black+cx[1]*pixels[13].black+cx[2]*pixels[14].black+cx[3]*
pixels[15].black));
pixel->alpha=(cy[0]*(cx[0]*pixels[0].alpha+cx[1]*pixels[1].alpha+cx[2]*
pixels[2].alpha+cx[3]*pixels[3].alpha)+cy[1]*(cx[0]*pixels[4].alpha+
cx[1]*pixels[5].alpha+cx[2]*pixels[6].alpha+cx[3]*pixels[7].alpha)+
cy[2]*(cx[0]*pixels[8].alpha+cx[1]*pixels[9].alpha+cx[2]*
pixels[10].alpha+cx[3]*pixels[11].alpha)+cy[3]*(cx[0]*pixels[12].alpha+
cx[1]*pixels[13].alpha+cx[2]*pixels[14].alpha+cx[3]*pixels[15].alpha));
break;
}
case IntegerInterpolatePixel:
{
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
GetPixelInfoPixel(image,p,pixel);
break;
}
case MeshInterpolatePixel:
{
PointInfo
delta,
luminance;
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
delta.x=x-x_offset;
delta.y=y-y_offset;
luminance.x=GetPixelLuma(image,p)-(double)
GetPixelLuma(image,p+3*GetPixelChannels(image));
luminance.y=GetPixelLuma(image,p+GetPixelChannels(image))-(double)
GetPixelLuma(image,p+2*GetPixelChannels(image));
AlphaBlendPixelInfo(image,p,pixels+0,alpha+0);
AlphaBlendPixelInfo(image,p+GetPixelChannels(image),pixels+1,alpha+1);
AlphaBlendPixelInfo(image,p+2*GetPixelChannels(image),pixels+2,alpha+2);
AlphaBlendPixelInfo(image,p+3*GetPixelChannels(image),pixels+3,alpha+3);
if (fabs((double) luminance.x) < fabs((double) luminance.y))
{
/*
Diagonal 0-3 NW-SE.
*/
if (delta.x <= delta.y)
{
/*
Bottom-left triangle (pixel: 2, diagonal: 0-3).
*/
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]);
gamma=PerceptibleReciprocal(gamma);
pixel->red=gamma*MeshInterpolate(&delta,pixels[2].red,
pixels[3].red,pixels[0].red);
pixel->green=gamma*MeshInterpolate(&delta,pixels[2].green,
pixels[3].green,pixels[0].green);
pixel->blue=gamma*MeshInterpolate(&delta,pixels[2].blue,
pixels[3].blue,pixels[0].blue);
if (image->colorspace == CMYKColorspace)
pixel->black=gamma*MeshInterpolate(&delta,pixels[2].black,
pixels[3].black,pixels[0].black);
gamma=MeshInterpolate(&delta,1.0,1.0,1.0);
pixel->alpha=gamma*MeshInterpolate(&delta,pixels[2].alpha,
pixels[3].alpha,pixels[0].alpha);
}
else
{
/*
Top-right triangle (pixel:1 , diagonal: 0-3).
*/
delta.x=1.0-delta.x;
gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]);
gamma=PerceptibleReciprocal(gamma);
pixel->red=gamma*MeshInterpolate(&delta,pixels[1].red,
pixels[0].red,pixels[3].red);
pixel->green=gamma*MeshInterpolate(&delta,pixels[1].green,
pixels[0].green,pixels[3].green);
pixel->blue=gamma*MeshInterpolate(&delta,pixels[1].blue,
pixels[0].blue,pixels[3].blue);
if (image->colorspace == CMYKColorspace)
pixel->black=gamma*MeshInterpolate(&delta,pixels[1].black,
pixels[0].black,pixels[3].black);
gamma=MeshInterpolate(&delta,1.0,1.0,1.0);
pixel->alpha=gamma*MeshInterpolate(&delta,pixels[1].alpha,
pixels[0].alpha,pixels[3].alpha);
}
}
else
{
/*
Diagonal 1-2 NE-SW.
*/
if (delta.x <= (1.0-delta.y))
{
/*
Top-left triangle (pixel: 0, diagonal: 1-2).
*/
gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]);
gamma=PerceptibleReciprocal(gamma);
pixel->red=gamma*MeshInterpolate(&delta,pixels[0].red,
pixels[1].red,pixels[2].red);
pixel->green=gamma*MeshInterpolate(&delta,pixels[0].green,
pixels[1].green,pixels[2].green);
pixel->blue=gamma*MeshInterpolate(&delta,pixels[0].blue,
pixels[1].blue,pixels[2].blue);
if (image->colorspace == CMYKColorspace)
pixel->black=gamma*MeshInterpolate(&delta,pixels[0].black,
pixels[1].black,pixels[2].black);
gamma=MeshInterpolate(&delta,1.0,1.0,1.0);
pixel->alpha=gamma*MeshInterpolate(&delta,pixels[0].alpha,
pixels[1].alpha,pixels[2].alpha);
}
else
{
/*
Bottom-right triangle (pixel: 3, diagonal: 1-2).
*/
delta.x=1.0-delta.x;
delta.y=1.0-delta.y;
gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]);
gamma=PerceptibleReciprocal(gamma);
pixel->red=gamma*MeshInterpolate(&delta,pixels[3].red,
pixels[2].red,pixels[1].red);
pixel->green=gamma*MeshInterpolate(&delta,pixels[3].green,
pixels[2].green,pixels[1].green);
pixel->blue=gamma*MeshInterpolate(&delta,pixels[3].blue,
pixels[2].blue,pixels[1].blue);
if (image->colorspace == CMYKColorspace)
pixel->black=gamma*MeshInterpolate(&delta,pixels[3].black,
pixels[2].black,pixels[1].black);
gamma=MeshInterpolate(&delta,1.0,1.0,1.0);
pixel->alpha=gamma*MeshInterpolate(&delta,pixels[3].alpha,
pixels[2].alpha,pixels[1].alpha);
}
}
break;
}
case NearestInterpolatePixel:
{
x_offset=CastDoubleToLong(floor(x+0.5));
y_offset=CastDoubleToLong(floor(y+0.5));
p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,1,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
GetPixelInfoPixel(image,p,pixel);
break;
}
case SplineInterpolatePixel:
{
double
cx[4],
cy[4];
p=GetCacheViewVirtualPixels(image_view,x_offset-1,y_offset-1,4,4,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (i=0; i < 16L; i++)
AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i);
SplineWeights((double) (x-x_offset),&cx);
SplineWeights((double) (y-y_offset),&cy);
pixel->red=(cy[0]*(cx[0]*pixels[0].red+cx[1]*pixels[1].red+cx[2]*
pixels[2].red+cx[3]*pixels[3].red)+cy[1]*(cx[0]*pixels[4].red+cx[1]*
pixels[5].red+cx[2]*pixels[6].red+cx[3]*pixels[7].red)+cy[2]*(cx[0]*
pixels[8].red+cx[1]*pixels[9].red+cx[2]*pixels[10].red+cx[3]*
pixels[11].red)+cy[3]*(cx[0]*pixels[12].red+cx[1]*pixels[13].red+cx[2]*
pixels[14].red+cx[3]*pixels[15].red));
pixel->green=(cy[0]*(cx[0]*pixels[0].green+cx[1]*pixels[1].green+cx[2]*
pixels[2].green+cx[3]*pixels[3].green)+cy[1]*(cx[0]*pixels[4].green+
cx[1]*pixels[5].green+cx[2]*pixels[6].green+cx[3]*pixels[7].green)+
cy[2]*(cx[0]*pixels[8].green+cx[1]*pixels[9].green+cx[2]*
pixels[10].green+cx[3]*pixels[11].green)+cy[3]*(cx[0]*pixels[12].green+
cx[1]*pixels[13].green+cx[2]*pixels[14].green+cx[3]*pixels[15].green));
pixel->blue=(cy[0]*(cx[0]*pixels[0].blue+cx[1]*pixels[1].blue+cx[2]*
pixels[2].blue+cx[3]*pixels[3].blue)+cy[1]*(cx[0]*pixels[4].blue+cx[1]*
pixels[5].blue+cx[2]*pixels[6].blue+cx[3]*pixels[7].blue)+cy[2]*(cx[0]*
pixels[8].blue+cx[1]*pixels[9].blue+cx[2]*pixels[10].blue+cx[3]*
pixels[11].blue)+cy[3]*(cx[0]*pixels[12].blue+cx[1]*pixels[13].blue+
cx[2]*pixels[14].blue+cx[3]*pixels[15].blue));
if (image->colorspace == CMYKColorspace)
pixel->black=(cy[0]*(cx[0]*pixels[0].black+cx[1]*pixels[1].black+cx[2]*
pixels[2].black+cx[3]*pixels[3].black)+cy[1]*(cx[0]*pixels[4].black+
cx[1]*pixels[5].black+cx[2]*pixels[6].black+cx[3]*pixels[7].black)+
cy[2]*(cx[0]*pixels[8].black+cx[1]*pixels[9].black+cx[2]*
pixels[10].black+cx[3]*pixels[11].black)+cy[3]*(cx[0]*
pixels[12].black+cx[1]*pixels[13].black+cx[2]*pixels[14].black+cx[3]*
pixels[15].black));
pixel->alpha=(cy[0]*(cx[0]*pixels[0].alpha+cx[1]*pixels[1].alpha+cx[2]*
pixels[2].alpha+cx[3]*pixels[3].alpha)+cy[1]*(cx[0]*pixels[4].alpha+
cx[1]*pixels[5].alpha+cx[2]*pixels[6].alpha+cx[3]*pixels[7].alpha)+
cy[2]*(cx[0]*pixels[8].alpha+cx[1]*pixels[9].alpha+cx[2]*
pixels[10].alpha+cx[3]*pixels[11].alpha)+cy[3]*(cx[0]*pixels[12].alpha+
cx[1]*pixels[13].alpha+cx[2]*pixels[14].alpha+cx[3]*pixels[15].alpha));
break;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I s F u z z y E q u i v a l e n c e P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsFuzzyEquivalencePixel() returns MagickTrue if the distance between two
% pixels is less than the specified distance in a linear three (or four)
% dimensional color space.
%
% The format of the IsFuzzyEquivalencePixel method is:
%
% void IsFuzzyEquivalencePixel(const Image *source,const Quantum *p,
% const Image *destination,const Quantum *q)
%
% A description of each parameter follows:
%
% o source: the source image.
%
% o p: Pixel p.
%
% o destination: the destination image.
%
% o q: Pixel q.
%
*/
MagickExport MagickBooleanType IsFuzzyEquivalencePixel(const Image *source,
const Quantum *p,const Image *destination,const Quantum *q)
{
double
fuzz,
pixel;
double
distance,
scale;
fuzz=GetFuzzyColorDistance(source,destination);
scale=1.0;
distance=0.0;
if (source->alpha_trait != UndefinedPixelTrait ||
destination->alpha_trait != UndefinedPixelTrait)
{
/*
Transparencies are involved - set alpha distance
*/
pixel=GetPixelAlpha(source,p)-(double) GetPixelAlpha(destination,q);
distance=pixel*pixel;
if (distance > fuzz)
return(MagickFalse);
/*
Generate a alpha scaling factor to generate a 4D cone on colorspace
Note that if one color is transparent, distance has no color component.
*/
if (source->alpha_trait != UndefinedPixelTrait)
scale=QuantumScale*GetPixelAlpha(source,p);
if (destination->alpha_trait != UndefinedPixelTrait)
scale*=QuantumScale*GetPixelAlpha(destination,q);
if (scale <= MagickEpsilon)
return(MagickTrue);
}
/*
RGB or CMY color cube
*/
distance*=3.0; /* rescale appropriately */
fuzz*=3.0;
pixel=GetPixelRed(source,p)-(double) GetPixelRed(destination,q);
if (IsHueCompatibleColorspace(source->colorspace) != MagickFalse)
{
/*
Compute an arc distance for hue. It should be a vector angle of
'S'/'W' length with 'L'/'B' forming appropriate cones.
*/
if (fabs((double) pixel) > (QuantumRange/2))
pixel-=QuantumRange;
pixel*=2.0;
}
distance+=scale*pixel*pixel;
if (distance > fuzz)
return(MagickFalse);
pixel=GetPixelGreen(source,p)-(double) GetPixelGreen(destination,q);
distance+=scale*pixel*pixel;
if (distance > fuzz)
return(MagickFalse);
pixel=GetPixelBlue(source,p)-(double) GetPixelBlue(destination,q);
distance+=scale*pixel*pixel;
if (distance > fuzz)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I s F u z z y E q u i v a l e n c e P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsFuzzyEquivalencePixelInfo() returns true if the distance between two
% colors is less than the specified distance in a linear three (or four)
% dimensional color space.
%
% This implements the equivalent of:
% fuzz < sqrt(color_distance^2 * u.a*v.a + alpha_distance^2)
%
% Which produces a multi-dimensional cone for that colorspace along the
% transparency vector.
%
% For example for an RGB:
% color_distance^2 = ( (u.r-v.r)^2 + (u.g-v.g)^2 + (u.b-v.b)^2 ) / 3
%
% See https://imagemagick.org/Usage/bugs/fuzz_distance/
%
% Hue colorspace distances need more work. Hue is not a distance, it is an
% angle!
%
% A check that q is in the same color space as p should be made and the
% appropriate mapping made. -- Anthony Thyssen 8 December 2010
%
% The format of the IsFuzzyEquivalencePixelInfo method is:
%
% MagickBooleanType IsFuzzyEquivalencePixelInfo(const PixelInfo *p,
% const PixelInfo *q)
%
% A description of each parameter follows:
%
% o p: Pixel p.
%
% o q: Pixel q.
%
*/
MagickExport MagickBooleanType IsFuzzyEquivalencePixelInfo(const PixelInfo *p,
const PixelInfo *q)
{
double
fuzz,
pixel;
double
scale,
distance;
fuzz=(double) MagickMax(MagickMax(p->fuzz,q->fuzz),(MagickRealType)
MagickSQ1_2);
fuzz*=fuzz;
scale=1.0;
distance=0.0;
if ((p->alpha_trait != UndefinedPixelTrait) ||
(q->alpha_trait != UndefinedPixelTrait))
{
/*
Transparencies are involved - set alpha distance.
*/
pixel=(p->alpha_trait != UndefinedPixelTrait ? p->alpha : OpaqueAlpha)-
(q->alpha_trait != UndefinedPixelTrait ? q->alpha : OpaqueAlpha);
distance=pixel*pixel;
if (distance > fuzz)
return(MagickFalse);
/*
Generate a alpha scaling factor to generate a 4D cone on colorspace.
If one color is transparent, distance has no color component.
*/
if (p->alpha_trait != UndefinedPixelTrait)
scale=(QuantumScale*p->alpha);
if (q->alpha_trait != UndefinedPixelTrait)
scale*=(QuantumScale*q->alpha);
if (scale <= MagickEpsilon )
return(MagickTrue);
}
/*
CMYK create a CMY cube with a multi-dimensional cone toward black.
*/
if (p->colorspace == CMYKColorspace)
{
pixel=p->black-q->black;
distance+=pixel*pixel*scale;
if (distance > fuzz)
return(MagickFalse);
scale*=(double) (QuantumScale*(QuantumRange-p->black));
scale*=(double) (QuantumScale*(QuantumRange-q->black));
}
/*
RGB or CMY color cube.
*/
distance*=3.0; /* rescale appropriately */
fuzz*=3.0;
pixel=p->red-q->red;
if (IsHueCompatibleColorspace(p->colorspace) != MagickFalse)
{
/*
This calculates a arc distance for hue-- it should be a vector
angle of 'S'/'W' length with 'L'/'B' forming appropriate cones.
In other words this is a hack - Anthony.
*/
if (fabs((double) pixel) > (QuantumRange/2))
pixel-=QuantumRange;
pixel*=2.0;
}
distance+=pixel*pixel*scale;
if (distance > fuzz)
return(MagickFalse);
pixel=p->green-q->green;
distance+=pixel*pixel*scale;
if (distance > fuzz)
return(MagickFalse);
pixel=p->blue-q->blue;
distance+=pixel*pixel*scale;
if (distance > fuzz)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C h a n n e l M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelChannelMask() sets the pixel channel map from the specified channel
% mask.
%
% The format of the SetPixelChannelMask method is:
%
% ChannelType SetPixelChannelMask(Image *image,
% const ChannelType channel_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel_mask: the channel mask.
%
*/
static void LogPixelChannels(const Image *image)
{
ssize_t
i;
(void) LogMagickEvent(PixelEvent,GetMagickModule(),"%s[%08x]",
image->filename,image->channel_mask);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
char
channel_name[MagickPathExtent],
traits[MagickPathExtent];
const char
*name;
PixelChannel
channel;
channel=GetPixelChannelChannel(image,i);
switch (channel)
{
case RedPixelChannel:
{
name="red";
if (image->colorspace == CMYKColorspace)
name="cyan";
if ((image->colorspace == LinearGRAYColorspace) ||
(image->colorspace == GRAYColorspace))
name="gray";
break;
}
case GreenPixelChannel:
{
name="green";
if (image->colorspace == CMYKColorspace)
name="magenta";
break;
}
case BluePixelChannel:
{
name="blue";
if (image->colorspace == CMYKColorspace)
name="yellow";
break;
}
case BlackPixelChannel:
{
name="black";
if (image->storage_class == PseudoClass)
name="index";
break;
}
case IndexPixelChannel:
{
name="index";
break;
}
case AlphaPixelChannel:
{
name="alpha";
break;
}
case ReadMaskPixelChannel:
{
name="read-mask";
break;
}
case WriteMaskPixelChannel:
{
name="write-mask";
break;
}
case CompositeMaskPixelChannel:
{
name="composite-mask";
break;
}
case MetaPixelChannel:
{
name="meta";
break;
}
default:
name="undefined";
}
if (image->colorspace == UndefinedColorspace)
{
(void) FormatLocaleString(channel_name,MagickPathExtent,"%.20g",
(double) channel);
name=(const char *) channel_name;
}
*traits='\0';
if ((GetPixelChannelTraits(image,channel) & UpdatePixelTrait) != 0)
(void) ConcatenateMagickString(traits,"update,",MagickPathExtent);
if ((GetPixelChannelTraits(image,channel) & BlendPixelTrait) != 0)
(void) ConcatenateMagickString(traits,"blend,",MagickPathExtent);
if ((GetPixelChannelTraits(image,channel) & CopyPixelTrait) != 0)
(void) ConcatenateMagickString(traits,"copy,",MagickPathExtent);
if (*traits == '\0')
(void) ConcatenateMagickString(traits,"undefined,",MagickPathExtent);
traits[strlen(traits)-1]='\0';
(void) LogMagickEvent(PixelEvent,GetMagickModule()," %.20g: %s (%s)",
(double) i,name,traits);
}
}
MagickExport ChannelType SetPixelChannelMask(Image *image,
const ChannelType channel_mask)
{
#define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01)
ChannelType
mask;
ssize_t
i;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(PixelEvent,GetMagickModule(),"%s[%08x]",
image->filename,channel_mask);
mask=image->channel_mask;
image->channel_mask=channel_mask;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (GetChannelBit(channel_mask,channel) == 0)
{
SetPixelChannelTraits(image,channel,CopyPixelTrait);
continue;
}
if (channel == AlphaPixelChannel)
{
if ((image->alpha_trait & CopyPixelTrait) != 0)
{
SetPixelChannelTraits(image,channel,CopyPixelTrait);
continue;
}
SetPixelChannelTraits(image,channel,UpdatePixelTrait);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
SetPixelChannelTraits(image,channel,(const PixelTrait)
(UpdatePixelTrait | BlendPixelTrait));
continue;
}
SetPixelChannelTraits(image,channel,UpdatePixelTrait);
}
if (image->storage_class == PseudoClass)
SetPixelChannelTraits(image,IndexPixelChannel,CopyPixelTrait);
if ((image->channels & ReadMaskChannel) != 0)
SetPixelChannelTraits(image,ReadMaskPixelChannel,CopyPixelTrait);
if ((image->channels & WriteMaskChannel) != 0)
SetPixelChannelTraits(image,WriteMaskPixelChannel,CopyPixelTrait);
if ((image->channels & CompositeMaskChannel) != 0)
SetPixelChannelTraits(image,CompositeMaskPixelChannel,CopyPixelTrait);
if (image->debug != MagickFalse)
LogPixelChannels(image);
return(mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l M e t a C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelMetaChannels() sets the image meta channels.
%
% The format of the SetPixelMetaChannels method is:
%
% MagickBooleanType SetPixelMetaChannels(Image *image,
% const size_t number_meta_channels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_meta_channels: the number of meta channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetPixelMetaChannels(Image *image,
const size_t number_meta_channels,ExceptionInfo *exception)
{
image->number_meta_channels=number_meta_channels;
InitializePixelChannelMap(image);
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o r t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SortImagePixels() sorts pixels within each scanline in ascending order of
% intensity.
%
% The format of the SortImagePixels method is:
%
% MagickBooleanType SortImagePixels(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SortImagePixels(Image *image,
ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Sort image pixels.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns-1; x++)
{
MagickRealType
current,
previous;
ssize_t
j;
previous=GetPixelIntensity(image,q);
for (j=0; j < (ssize_t) (image->columns-x-1); j++)
{
current=GetPixelIntensity(image,q+(j+1)*GetPixelChannels(image));
if (previous > current)
{
Quantum
pixel[MaxPixelChannels];
/*
Swap adjacent pixels.
*/
(void) memcpy(pixel,q+j*GetPixelChannels(image),
GetPixelChannels(image)*sizeof(Quantum));
(void) memcpy(q+j*GetPixelChannels(image),q+(j+1)*
GetPixelChannels(image),GetPixelChannels(image)*sizeof(Quantum));
(void) memcpy(q+(j+1)*GetPixelChannels(image),pixel,
GetPixelChannels(image)*sizeof(Quantum));
}
else
previous=current;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
GB_unop__trunc_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__trunc_fc32_fc32)
// op(A') function: GB (_unop_tran__trunc_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_ctruncf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_ctruncf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_ctruncf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TRUNC || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__trunc_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_ctruncf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_ctruncf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__trunc_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
compton.c | // Licensed under a 3-clause BSD style license - see LICENSE
#include "common.h"
#include "stdlib.h"
double KN(double epsilon, double gamma, double nu){
double epsilon1 = nu*h;
double gamma_e = 4*epsilon*gamma/(M_e*c*c);
double q = epsilon1/(gamma_e*(gamma*M_e*c*c - epsilon1));
double res = 3*sigma_T*c/(4*gamma*gamma*epsilon) *(
2*q*log(q) +
(1+2*q)*(1-q) +
(gamma_e*q)*(gamma_e*q)*(1-q)/
(2*(1 + gamma_e*q))
);
if (isnan(res)) return 0;
if (q > 1) return 0;
if (res < 0) return 0;
return res;
}
double compton_emissivity_s(double *epsilon, double *gamma,
double nu, double *n_ph, double *e_dist,
int eps_sz, int gamma_sz){
double* tmp = (double*) malloc(gamma_sz*sizeof(double));
double sum;
double last, next;
double res;
int i;
#pragma omp parallel for private(last,next,sum)
for (i=0; i<gamma_sz; i++){
sum = 0;
last = n_ph[0]*e_dist[i]*KN(epsilon[0], gamma[i], nu);
for (int j=0; j<eps_sz-1; j++){
next = n_ph[j+1]*e_dist[i]*KN(epsilon[j+1], gamma[i], nu);
sum += (last+next)*(epsilon[j+1]-epsilon[j])*0.5;
last = next;
}
tmp[i] = sum;
}
res = trapz(gamma, tmp, gamma_sz);
free(tmp);
return POW2(h)*nu*res/(4*M_PI);
}
void compton_emissivity(double *res, double *epsilon, double *gamma,
double *nu, double *n_ph, double *e_dist,
int eps_sz, int gamma_sz, int nu_sz){
int i;
for(i=0; i<nu_sz; i++)
res[i] = compton_emissivity_s(epsilon, gamma, nu[i],
n_ph, e_dist, eps_sz, gamma_sz);
}
|
noarg_target_function.c | #include <stdio.h>
#pragma omp declare target
void my_noarg_func(){printf("This is a noarg function \n"); }
#pragma omp end declare target
int main(){
#pragma omp target
my_noarg_func();
return 0;
}
|
GB_binop__second_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__second_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__second_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__second_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_fp64)
// A*D function (colscale): GB (_AxD__second_fp64)
// D*A function (rowscale): GB (_DxB__second_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__second_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__second_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_fp64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: double
// A type: double
// A pattern? 1
// B type: double
// B pattern? 0
// BinaryOp: cij = bij
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_FP64 || GxB_NO_SECOND_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__second_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
FlopCounterFunctor.h | /**
* @file FlopCounterFunctor.h
*
* @date 22 Jan 2018
* @author tchipevn
*/
#pragma once
#include "autopas/pairwiseFunctors/Functor.h"
#include "autopas/utils/ArrayMath.h"
namespace autopas {
/**
* This class helps in getting the number of performed floating point
* operations. It is a functor that only calculated the amount of floating point
* operations.
* @todo this class currently is limited to the following case:
* - constant cutoff radius
* - constant amount of floating point operations for one kernel call (distance < cutoff)
* @tparam Particle
* @tparam ParticleCell
*/
template <class Particle>
class FlopCounterFunctor : public Functor<Particle, FlopCounterFunctor<Particle>> {
public:
bool isRelevantForTuning() override { return false; }
bool allowsNewton3() override { return true; }
bool allowsNonNewton3() override { return true; }
/**
* constructor of FlopCounterFunctor
* @param cutoffRadius the cutoff radius
*/
explicit FlopCounterFunctor<Particle>(double cutoffRadius)
: autopas::Functor<Particle, FlopCounterFunctor<Particle>>(cutoffRadius),
_cutoffSquare(cutoffRadius * cutoffRadius),
_distanceCalculations(0ul),
_kernelCalls(0ul) {}
void AoSFunctor(Particle &i, Particle &j, bool /*newton3*/) override {
if (i.isDummy() or j.isDummy()) {
return;
}
auto dr = utils::ArrayMath::sub(i.getR(), j.getR());
double dr2 = utils::ArrayMath::dot(dr, dr);
_distanceCalculations.fetch_add(1, std::memory_order_relaxed);
if (dr2 <= _cutoffSquare) {
_kernelCalls.fetch_add(1, std::memory_order_relaxed);
}
}
/**
* See Functor::SoAFunctorSingle()
* @param soa
*/
void SoAFunctorSingle(SoAView<typename Particle::SoAArraysType> soa, bool /*newton3*/) override {
if (soa.getNumParticles() == 0) return;
double *const __restrict x1ptr = soa.template begin<Particle::AttributeNames::posX>();
double *const __restrict y1ptr = soa.template begin<Particle::AttributeNames::posY>();
double *const __restrict z1ptr = soa.template begin<Particle::AttributeNames::posZ>();
for (size_t i = 0; i < soa.getNumParticles(); ++i) {
size_t distanceCalculationsAcc = 0;
size_t kernelCallsAcc = 0;
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
#pragma omp simd reduction(+ : kernelCallsAcc, distanceCalculationsAcc)
for (size_t j = i + 1; j < soa.getNumParticles(); ++j) {
++distanceCalculationsAcc;
const double drx = x1ptr[i] - x1ptr[j];
const double dry = y1ptr[i] - y1ptr[j];
const double drz = z1ptr[i] - z1ptr[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
if (dr2 <= _cutoffSquare) {
++kernelCallsAcc;
}
}
_distanceCalculations.fetch_add(distanceCalculationsAcc, std::memory_order_relaxed);
_kernelCalls.fetch_add(kernelCallsAcc, std::memory_order_relaxed);
}
}
/**
* See Functor::SoAFunctorPair()
* @param soa1
* @param soa2
*/
void SoAFunctorPair(SoAView<typename Particle::SoAArraysType> soa1, SoAView<typename Particle::SoAArraysType> soa2,
bool /*newton3*/) override {
double *const __restrict x1ptr = soa1.template begin<Particle::AttributeNames::posX>();
double *const __restrict y1ptr = soa1.template begin<Particle::AttributeNames::posY>();
double *const __restrict z1ptr = soa1.template begin<Particle::AttributeNames::posZ>();
double *const __restrict x2ptr = soa2.template begin<Particle::AttributeNames::posX>();
double *const __restrict y2ptr = soa2.template begin<Particle::AttributeNames::posY>();
double *const __restrict z2ptr = soa2.template begin<Particle::AttributeNames::posZ>();
for (size_t i = 0; i < soa1.getNumParticles(); ++i) {
size_t distanceCalculationsAcc = 0;
size_t kernelCallsAcc = 0;
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
#pragma omp simd reduction(+ : kernelCallsAcc, distanceCalculationsAcc)
for (size_t j = 0; j < soa2.getNumParticles(); ++j) {
++distanceCalculationsAcc;
const double drx = x1ptr[i] - x2ptr[j];
const double dry = y1ptr[i] - y2ptr[j];
const double drz = z1ptr[i] - z2ptr[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
if (dr2 <= _cutoffSquare) {
++kernelCallsAcc;
}
}
_distanceCalculations.fetch_add(distanceCalculationsAcc, std::memory_order_relaxed);
_kernelCalls.fetch_add(kernelCallsAcc, std::memory_order_relaxed);
}
}
/**
* See Functor::SoAFunctorVerlet()
* @param soa
* @param indexFirst
* @param neighborList
*/
void SoAFunctorVerlet(SoAView<typename Particle::SoAArraysType> soa, const size_t indexFirst,
const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList,
bool /*newton3*/) override {
auto numParts = soa.getNumParticles();
if (numParts == 0) return;
double *const __restrict xptr = soa.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr = soa.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr = soa.template begin<Particle::AttributeNames::posZ>();
const size_t listSizeI = neighborList.size();
const size_t *const __restrict currentList = neighborList.data();
// this is a magic number, that should correspond to at least
// vectorization width*N have testet multiple sizes:
// 4: small speedup compared to AoS
// 8: small speedup compared to AoS
// 12: small but best speedup compared to Aos
// 16: smaller speedup
// in theory this is a variable, we could auto-tune over...
#ifdef __AVX512F__
// use a multiple of 8 for avx
const size_t vecsize = 16;
#else
// for everything else 12 is faster
const size_t vecsize = 12;
#endif
size_t joff = 0;
// if the size of the verlet list is larger than the given size vecsize,
// we will use a vectorized version.
if (listSizeI >= vecsize) {
alignas(64) std::array<double, vecsize> xtmp{}, ytmp{}, ztmp{}, xArr{}, yArr{}, zArr{};
// broadcast of the position of particle i
for (size_t tmpj = 0; tmpj < vecsize; tmpj++) {
xtmp[tmpj] = xptr[indexFirst];
ytmp[tmpj] = yptr[indexFirst];
ztmp[tmpj] = zptr[indexFirst];
}
// loop over the verlet list from 0 to x*vecsize
for (; joff < listSizeI - vecsize + 1; joff += vecsize) {
size_t distanceCalculationsAcc = 0;
size_t kernelCallsAcc = 0;
// in each iteration we calculate the interactions of particle i with
// vecsize particles in the neighborlist of particle i starting at
// particle joff
// gather position of particle j
#pragma omp simd safelen(vecsize)
for (size_t tmpj = 0; tmpj < vecsize; tmpj++) {
xArr[tmpj] = xptr[currentList[joff + tmpj]];
yArr[tmpj] = yptr[currentList[joff + tmpj]];
zArr[tmpj] = zptr[currentList[joff + tmpj]];
}
// do omp simd with reduction of the interaction
#pragma omp simd reduction(+ : kernelCallsAcc, distanceCalculationsAcc) safelen(vecsize)
for (size_t j = 0; j < vecsize; j++) {
++distanceCalculationsAcc;
const double drx = xtmp[j] - xArr[j];
const double dry = ytmp[j] - yArr[j];
const double drz = ztmp[j] - zArr[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
const unsigned long mask = (dr2 <= _cutoffSquare) ? 1 : 0;
kernelCallsAcc += mask;
}
_distanceCalculations.fetch_add(distanceCalculationsAcc, std::memory_order_relaxed);
_kernelCalls.fetch_add(kernelCallsAcc, std::memory_order_relaxed);
}
}
size_t distanceCalculationsAcc = 0;
size_t kernelCallsAcc = 0;
// this loop goes over the remainder and uses no optimizations
for (size_t jNeighIndex = joff; jNeighIndex < listSizeI; ++jNeighIndex) {
size_t j = neighborList[jNeighIndex];
if (indexFirst == j) continue;
++distanceCalculationsAcc;
const double drx = xptr[indexFirst] - xptr[j];
const double dry = yptr[indexFirst] - yptr[j];
const double drz = zptr[indexFirst] - zptr[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
if (dr2 <= _cutoffSquare) {
++kernelCallsAcc;
}
}
_distanceCalculations.fetch_add(distanceCalculationsAcc, std::memory_order_relaxed);
_kernelCalls.fetch_add(kernelCallsAcc, std::memory_order_relaxed);
}
/**
* @copydoc Functor::getNeededAttr()
*/
constexpr static std::array<typename Particle::AttributeNames, 3> getNeededAttr() {
return std::array<typename Particle::AttributeNames, 3>{
Particle::AttributeNames::posX, Particle::AttributeNames::posY, Particle::AttributeNames::posZ};
}
/**
* @copydoc Functor::getNeededAttr(std::false_type)
*/
constexpr static std::array<typename Particle::AttributeNames, 3> getNeededAttr(std::false_type) {
return getNeededAttr();
}
/**
* @copydoc Functor::getComputedAttr()
*/
constexpr static std::array<typename Particle::AttributeNames, 0> getComputedAttr() {
return std::array<typename Particle::AttributeNames, 0>{/*Nothing*/};
}
/**
* get the hit rate of the pair-wise interaction, i.e. the ratio of the number
* of kernel calls compared to the number of distance calculations
* @return the hit rate
*/
double getHitRate() { return static_cast<double>(_kernelCalls) / static_cast<double>(_distanceCalculations); }
/**
* get the total number of flops
* @param numFlopsPerKernelCall
* @return
*/
[[nodiscard]] size_t getFlops(size_t numFlopsPerKernelCall) const {
const auto distFlops = numFlopsPerDistanceCalculation * _distanceCalculations;
const auto kernFlops = numFlopsPerKernelCall * _kernelCalls;
return distFlops + kernFlops;
}
/**
* get the number of calculated distance operations
* @return
*/
[[nodiscard]] size_t getDistanceCalculations() const { return _distanceCalculations; }
/**
* get the number of kernel calls, i.e. the number of pairs of particles with
* a distance not larger than the cutoff
* @return
*/
[[nodiscard]] size_t getKernelCalls() const { return _kernelCalls; }
/**
* number of flops for one distance calculation.
* 3 sub + 3 square + 2 add
*/
static constexpr double numFlopsPerDistanceCalculation = 8.0;
private:
double _cutoffSquare;
std::atomic<size_t> _distanceCalculations, _kernelCalls;
};
} // namespace autopas
|
GB_binop__islt_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_uint16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__islt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint16)
// A*D function (colscale): GB (_AxD__islt_uint16)
// D*A function (rowscale): GB (_DxB__islt_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint16)
// C=scalar+B GB (_bind1st__islt_uint16)
// C=scalar+B' GB (_bind1st_tran__islt_uint16)
// C=A+scalar GB (_bind2nd__islt_uint16)
// C=A'+scalar GB (_bind2nd_tran__islt_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_UINT16 || GxB_NO_ISLT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__islt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__islt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
serial_tree_learner.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#include <LightGBM/dataset.h>
#include <LightGBM/tree.h>
#include <LightGBM/tree_learner.h>
#include <LightGBM/cuda/vector_cudahost.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/utils/json11.h>
#include <LightGBM/utils/random.h>
#include <string>
#include <cmath>
#include <cstdio>
#include <memory>
#include <random>
#include <vector>
#include "col_sampler.hpp"
#include "data_partition.hpp"
#include "feature_histogram.hpp"
#include "leaf_splits.hpp"
#include "monotone_constraints.hpp"
#include "split_info.hpp"
#ifdef USE_GPU
// Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled.
// This is necessary to pin the two arrays in memory and make transferring faster.
#include <boost/align/aligned_allocator.hpp>
#endif
namespace LightGBM {
using json11::Json;
/*! \brief forward declaration */
class CostEfficientGradientBoosting;
/*!
* \brief Used for learning a tree by single machine
*/
class SerialTreeLearner: public TreeLearner {
public:
friend CostEfficientGradientBoosting;
explicit SerialTreeLearner(const Config* config);
~SerialTreeLearner();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
void ResetTrainingData(const Dataset* train_data,
bool is_constant_hessian) override {
ResetTrainingDataInner(train_data, is_constant_hessian, true);
}
void ResetIsConstantHessian(bool is_constant_hessian) override {
share_state_->is_constant_hessian = is_constant_hessian;
}
virtual void ResetTrainingDataInner(const Dataset* train_data,
bool is_constant_hessian,
bool reset_multi_val_bin);
void ResetConfig(const Config* config) override;
inline void SetForcedSplit(const Json* forced_split_json) override {
if (forced_split_json != nullptr && !forced_split_json->is_null()) {
forced_split_json_ = forced_split_json;
} else {
forced_split_json_ = nullptr;
}
}
Tree* Train(const score_t* gradients, const score_t *hessians) override;
Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override;
Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred,
const score_t* gradients, const score_t* hessians) override;
void SetBaggingData(const Dataset* subset, const data_size_t* used_indices, data_size_t num_data) override {
if (subset == nullptr) {
data_partition_->SetUsedDataIndices(used_indices, num_data);
share_state_->is_use_subrow = false;
} else {
ResetTrainingDataInner(subset, share_state_->is_constant_hessian, false);
share_state_->is_use_subrow = true;
share_state_->is_subrow_copied = false;
share_state_->bagging_use_indices = used_indices;
share_state_->bagging_indices_cnt = num_data;
}
}
void AddPredictionToScore(const Tree* tree,
double* out_score) const override {
if (tree->num_leaves() <= 1) {
return;
}
CHECK_LE(tree->num_leaves(), data_partition_->num_leaves());
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < tree->num_leaves(); ++i) {
double output = static_cast<double>(tree->LeafOutput(i));
data_size_t cnt_leaf_data = 0;
auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
out_score[tmp_idx[j]] += output;
}
}
}
void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter,
data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override;
/*! \brief Get output of parent node, used for path smoothing */
double GetParentOutput(const Tree* tree, const LeafSplits* leaf_splits) const;
protected:
void ComputeBestSplitForFeature(FeatureHistogram* histogram_array_,
int feature_index, int real_fidx,
bool is_feature_used, int num_data,
const LeafSplits* leaf_splits,
SplitInfo* best_split, double parent_output);
void GetShareStates(const Dataset* dataset, bool is_constant_hessian, bool is_first_time);
void RecomputeBestSplitForLeaf(int leaf, SplitInfo* split);
/*!
* \brief Some initial works before training
*/
virtual void BeforeTrain();
/*!
* \brief Some initial works before FindBestSplit
*/
virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf);
virtual void FindBestSplits(const Tree* tree);
virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract, const Tree*);
/*!
* \brief Partition tree and data according best split.
* \param tree Current tree, will be splitted on this function.
* \param best_leaf The index of leaf that will be splitted.
* \param left_leaf The index of left leaf after splitted.
* \param right_leaf The index of right leaf after splitted.
*/
inline virtual void Split(Tree* tree, int best_leaf, int* left_leaf,
int* right_leaf) {
SplitInner(tree, best_leaf, left_leaf, right_leaf, true);
}
void SplitInner(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf,
bool update_cnt);
/* Force splits with forced_split_json dict and then return num splits forced.*/
int32_t ForceSplits(Tree* tree, int* left_leaf, int* right_leaf,
int* cur_depth);
/*!
* \brief Get the number of data in a leaf
* \param leaf_idx The index of leaf
* \return The number of data in the leaf_idx leaf
*/
inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const;
/*! \brief number of data */
data_size_t num_data_;
/*! \brief number of features */
int num_features_;
/*! \brief training data */
const Dataset* train_data_;
/*! \brief gradients of current iteration */
const score_t* gradients_;
/*! \brief hessians of current iteration */
const score_t* hessians_;
/*! \brief training data partition on leaves */
std::unique_ptr<DataPartition> data_partition_;
/*! \brief pointer to histograms array of parent of current leaves */
FeatureHistogram* parent_leaf_histogram_array_;
/*! \brief pointer to histograms array of smaller leaf */
FeatureHistogram* smaller_leaf_histogram_array_;
/*! \brief pointer to histograms array of larger leaf */
FeatureHistogram* larger_leaf_histogram_array_;
/*! \brief store best split points for all leaves */
std::vector<SplitInfo> best_split_per_leaf_;
/*! \brief store best split per feature for all leaves */
std::vector<SplitInfo> splits_per_leaf_;
/*! \brief stores minimum and maximum constraints for each leaf */
std::unique_ptr<LeafConstraintsBase> constraints_;
/*! \brief stores best thresholds for all feature for smaller leaf */
std::unique_ptr<LeafSplits> smaller_leaf_splits_;
/*! \brief stores best thresholds for all feature for larger leaf */
std::unique_ptr<LeafSplits> larger_leaf_splits_;
#ifdef USE_GPU
/*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_;
#elif USE_CUDA
/*! \brief gradients of current iteration, ordered for cache optimized */
std::vector<score_t, CHAllocator<score_t>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized */
std::vector<score_t, CHAllocator<score_t>> ordered_hessians_;
#else
/*! \brief gradients of current iteration, ordered for cache optimized */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_hessians_;
#endif
/*! \brief used to cache historical histogram to speed up*/
HistogramPool histogram_pool_;
/*! \brief config of tree learner*/
const Config* config_;
ColSampler col_sampler_;
const Json* forced_split_json_;
std::unique_ptr<TrainingShareStates> share_state_;
std::unique_ptr<CostEfficientGradientBoosting> cegb_;
};
inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const {
if (leaf_idx >= 0) {
return data_partition_->leaf_count(leaf_idx);
} else {
return 0;
}
}
} // namespace LightGBM
#endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
|
convolution_3x3_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// author:BUG1989 (https://github.com/BUG1989/) Long-term support.
// author:FuGuangping (https://github.com/fu1899) Implemented the first version of INT8 quantization on ARMv7.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd23_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat>& kernel_tm2, int inch, int outch)
{
Mat kernel_tm(4 * 4, inch, outch, 2ul);
// G
const short ktm[4][3] = {
{2, 0, 0},
{1, 1, 1},
{1, -1, 1},
{0, 0, 2}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
for (int r = 0; r < 4; r++)
{
Mat kernel_tm_test(4 * 8, inch, outch / 8 + (outch % 8) / 4 + outch % 4, 2u);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const short* kernel0 = (const short*)kernel_tm + (p + 0) * inch * 16;
const short* kernel1 = (const short*)kernel_tm + (p + 1) * inch * 16;
const short* kernel2 = (const short*)kernel_tm + (p + 2) * inch * 16;
const short* kernel3 = (const short*)kernel_tm + (p + 3) * inch * 16;
const short* kernel4 = (const short*)kernel_tm + (p + 4) * inch * 16;
const short* kernel5 = (const short*)kernel_tm + (p + 5) * inch * 16;
const short* kernel6 = (const short*)kernel_tm + (p + 6) * inch * 16;
const short* kernel7 = (const short*)kernel_tm + (p + 7) * inch * 16;
short* ktmp = kernel_tm_test.channel(p / 8);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 16;
kernel1 += 16;
kernel2 += 16;
kernel3 += 16;
kernel4 += 16;
kernel5 += 16;
kernel6 += 16;
kernel7 += 16;
}
}
for (; p + 3 < outch; p += 4)
{
const short* kernel0 = (const short*)kernel_tm + (p + 0) * inch * 16;
const short* kernel1 = (const short*)kernel_tm + (p + 1) * inch * 16;
const short* kernel2 = (const short*)kernel_tm + (p + 2) * inch * 16;
const short* kernel3 = (const short*)kernel_tm + (p + 3) * inch * 16;
short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 16;
kernel1 += 16;
kernel2 += 16;
kernel3 += 16;
}
}
for (; p < outch; p++)
{
const short* kernel0 = (const short*)kernel_tm + p * inch * 16;
short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4 + p % 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 16;
}
}
kernel_tm2.push_back(kernel_tm_test);
}
}
static void conv3x3s1_winograd23_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in FeatherCNN
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles * 4, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 2;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row<short>(q);
short* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row<short>(q);
short* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row<short>(q);
short* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row<short>(q);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// load
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.8b}, [%0] \n"
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v1.8b}, [%1] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v2.8b}, [%2] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v3.8b}, [%3] \n"
// w = B_t * d, trans int8 to int16
"ssubl v4.8h, v0.8b, v2.8b \n" // d4
"saddl v5.8h, v1.8b, v2.8b \n" // d6
"ssubl v6.8h, v2.8b, v1.8b \n" // d8
"ssubl v7.8h, v3.8b, v1.8b \n" // d10
// transpose w to w_t
"trn1 v8.4h, v4.4h, v5.4h \n"
"trn2 v9.4h, v4.4h, v5.4h \n"
"trn1 v10.4h, v6.4h, v7.4h \n"
"trn2 v11.4h, v6.4h, v7.4h \n"
"trn1 v0.2s, v8.2s, v10.2s \n"
"trn2 v2.2s, v8.2s, v10.2s \n"
"trn1 v1.2s, v9.2s, v11.2s \n"
"trn2 v3.2s, v9.2s, v11.2s \n"
// U = B_t * d_t
"sub v4.4h, v0.4h, v2.4h \n"
"add v5.4h, v1.4h, v2.4h \n"
"sub v6.4h, v2.4h, v1.4h \n"
"sub v7.4h, v3.4h, v1.4h \n"
// save
"st1 {v4.4h}, [%4] \n"
"st1 {v5.4h}, [%5] \n"
"st1 {v6.4h}, [%6] \n"
"st1 {v7.4h}, [%7] \n"
: "=r"(r0), // %0
"=r"(r1), // %1
"=r"(r2), // %2
"=r"(r3), // %3
"=r"(out_tm0), // %4
"=r"(out_tm1), // %5
"=r"(out_tm2), // %6
"=r"(out_tm3) // %7
: "0"(r0),
"1"(r1),
"2"(r2),
"3"(r3),
"4"(out_tm0),
"5"(out_tm1),
"6"(out_tm2),
"7"(out_tm3)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else
asm volatile(
// load
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0] \n"
"pld [%1, #64] \n"
"vld1.s8 {d1}, [%1] \n"
"pld [%2, #64] \n"
"vld1.s8 {d2}, [%2] \n"
"pld [%3, #64] \n"
"vld1.s8 {d3}, [%3] \n"
// w = B_t * d, trans int8 to int16
"vsubl.s8 q2, d0, d2 \n" // d4
"vaddl.s8 q3, d1, d2 \n" // d6
"vsubl.s8 q4, d2, d1 \n" // d8
"vsubl.s8 q5, d3, d1 \n" // d10
// transpose w to w_t
"vtrn.s16 d4, d6 \n"
"vtrn.s16 d8, d10 \n"
"vtrn.s32 d4, d8 \n"
"vtrn.s32 d6, d10 \n"
// U = B_t * d_t
"vsub.s16 d11, d4, d8 \n"
"vadd.s16 d12, d6, d8 \n"
"vsub.s16 d13, d8, d6 \n"
"vsub.s16 d14, d10, d6 \n"
// save
"vst1.s32 {d11}, [%4] \n"
"vst1.s32 {d12}, [%5] \n"
"vst1.s32 {d13}, [%6] \n"
"vst1.s32 {d14}, [%7] \n"
: "=r"(r0), // %0
"=r"(r1), // %1
"=r"(r2), // %2
"=r"(r3), // %3
"=r"(out_tm0), // %4
"=r"(out_tm1), // %5
"=r"(out_tm2), // %6
"=r"(out_tm3) // %7
: "0"(r0),
"1"(r1),
"2"(r2),
"3"(r3),
"4"(out_tm0),
"5"(out_tm1),
"6"(out_tm2),
"7"(out_tm3)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7");
#endif // __aarch64__
#else
short d0[4], d1[4], d2[4], d3[4];
short w0[4], w1[4], w2[4], w3[4];
short t0[4], t1[4], t2[4], t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
}
// U = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n] = d0[n];
out_tm1[n] = d1[n];
out_tm2[n] = d2[n];
out_tm3[n] = d3[n];
}
#endif
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in FeatherCNN
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 4; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
int* output4_tm = top_blob_tm.channel(p + 4);
int* output5_tm = top_blob_tm.channel(p + 5);
int* output6_tm = top_blob_tm.channel(p + 6);
int* output7_tm = top_blob_tm.channel(p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"eor v4.16b, v4.16b, v4.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"mov w4, %w20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v8.4h}, [%8] \n"
"ld1 {v9.4h, v10.4h}, [%9] \n" // _k0 = vld1q_s16(kptr);
"add %9, %9, #16 \n"
"ld1 {v11.4h, v12.4h}, [%9] \n" // _k0n = vld1q_s16(kptr+8);
"add %9, %9, #16 \n"
"ld1 {v13.4h, v14.4h}, [%9] \n" // _k1 = vld1q_s16(kptr+16);
"add %9, %9, #16 \n"
"ld1 {v15.4h, v16.4h}, [%9] \n" // _k1n = vld1q_s16(kptr+24);
"add %8, %8, #8 \n"
"add %9, %9, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43)
"smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53)
"smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63)
"smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
"st1 {v4.4s}, [%4] \n" //
"st1 {v5.4s}, [%5] \n" //
"st1 {v6.4s}, [%6] \n" //
"st1 {v7.4s}, [%7] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"vmov.s32 q4, #0 \n"
"vmov.s32 q5, #0 \n"
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"mov r4, %20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%9] \n" // _k0 = vld1q_s16(kptr);
"add %9, #16 \n"
"vld1.s16 {d20-d21}, [%9] \n" // _k0n = vld1q_s16(kptr+8);
"add %9, #16 \n"
"vld1.s16 {d22-d23}, [%9] \n" // _k1 = vld1q_s16(kptr+16);
"add %9, #16 \n"
"vld1.s16 {d24-d25}, [%9] \n" // _k1n = vld1q_s16(kptr+24);
"add %9, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43)
"vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53)
"vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63)
"vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
"vst1.s32 {d8-d9}, [%4] \n"
"vst1.s32 {d10-d11}, [%5] \n"
"vst1.s32 {d12-d13}, [%6] \n"
"vst1.s32 {d14-d15}, [%7] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int sum4[4] = {0};
int sum5[4] = {0};
int sum6[4] = {0};
int sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
sum4[n] += (int)r0[n] * kptr[n + 16];
sum5[n] += (int)r0[n] * kptr[n + 20];
sum6[n] += (int)r0[n] * kptr[n + 24];
sum7[n] += (int)r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __ARM_NEON
output0_tm += 16;
output1_tm += 16;
output2_tm += 16;
output3_tm += 16;
output4_tm += 16;
output5_tm += 16;
output6_tm += 16;
output7_tm += 16;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"mov w4, %w12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v8.4h}, [%4] \n"
"ld1 {v9.4h, v10.4h}, [%5] \n" // _k0 = vld1q_s16(kptr);
"add %5, %5, #16 \n"
"ld1 {v11.4h, v12.4h}, [%5] \n" // _k0n = vld1q_s16(kptr+8);
"add %4, %4, #8 \n"
"add %5, %5, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"mov r4, %12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%5] \n" // _k0 = vld1q_s16(kptr);
"add %5, #16 \n"
"vld1.s16 {d20-d21}, [%5] \n" // _k0n = vld1q_s16(kptr+8);
"add %5, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __ARM_NEON
output0_tm += 16;
output1_tm += 16;
output2_tm += 16;
output3_tm += 16;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"mov w4, %w6 \n"
"0: \n" // for (int q=0; q<inch; q++)
//"prfm pldl1keep, [%2, #128] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v8.4h}, [%1] \n"
"ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %1, %1, #8 \n"
"add %2, %2, #8 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"mov r4, %6 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0
"add %1, #8 \n"
"vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %2, #8 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q8", "q9");
#endif // __aarch64__
#else
int sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif
output0_tm += 16;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in FeatherCNN
int nRowBlocks = w_tm / 4;
#if __ARM_NEON
int32x2_t _shift = vdup_n_s32(-2);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* out_tile = top_blob_tm.channel(p);
int* outRow0 = top_blob_bordered.channel(p);
int* outRow1 = outRow0 + outw;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"add v0.4s, v0.4s, v1.4s \n" // s0 = s0 + s1 + s2;
"sub v1.4s, v1.4s, v2.4s \n"
"add v0.4s, v0.4s, v2.4s \n" // s1 = s1 - s2 + s3;
"add v1.4s, v1.4s, v3.4s \n"
"trn1 v4.4s, v0.4s, v1.4s \n"
"trn2 v5.4s, v0.4s, v1.4s \n"
"dup v6.2d, v4.d[1] \n"
"dup v7.2d, v5.d[1] \n"
"add v0.2s, v4.2s, v5.2s \n" // o0 = d0 + d1 + d2;
"sub v1.2s, v5.2s, v6.2s \n"
"add v0.2s, v0.2s, v6.2s \n" // o1 = d1 - d2 + d3;
"add v1.2s, v1.2s, v7.2s \n"
"sshl v0.2s, v0.2s, %6.2s \n" // o0 = o0 >> 2
"sshl v1.2s, v1.2s, %6.2s \n" // o1 = o1 >> 2
"st1 {v0.2s}, [%1], #8 \n"
"st1 {v1.2s}, [%2], #8 \n"
: "=r"(out_tile), // %0
"=r"(outRow0), // %1
"=r"(outRow1) // %2
: "0"(out_tile),
"1"(outRow0),
"2"(outRow1),
"w"(_shift) // %6
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0!, {d0-d7} \n"
"vaddq.s32 q0, q0, q1 \n" // s0 = s0 + s1 + s2;
"vsubq.s32 q1, q1, q2 \n"
"vaddq.s32 q0, q0, q2 \n" // s1 = s1 - s2 + s3;
"vaddq.s32 q1, q1, q3 \n"
"vtrn.s32 q0, q1 \n"
"vadd.s32 d8, d0, d2 \n" // o0 = d0 + d1 + d2;
"vsub.s32 d9, d2, d1 \n"
"vadd.s32 d8, d8, d1 \n" // o1 = d1 - d2 + d3;
"vadd.s32 d9, d9, d3 \n"
"vshl.s32 d8, d8, %P6 \n" // o0 = o0 >> 2
"vshl.s32 d9, d9, %P6 \n" // o1 = o1 >> 2
"vst1.s32 {d8}, [%1]! \n"
"vst1.s32 {d9}, [%2]! \n"
: "=r"(out_tile), // %0
"=r"(outRow0), // %1
"=r"(outRow1) // %2
: "0"(out_tile),
"1"(outRow0),
"2"(outRow1),
"w"(_shift) // %6
: "cc", "memory", "q0", "q1", "q2", "q3", "q4");
#endif // __aarch64__
#else
int s0[4], s1[4], s2[4], s3[4];
int w0[4], w1[4];
int d0[2], d1[2], d2[2], d3[2];
int o0[2], o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 4];
s2[n] = out_tile[n + 8];
s3[n] = out_tile[n + 12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d1[0] = w0[1];
d1[1] = w1[1];
d2[0] = w0[2];
d2[1] = w1[2];
d3[0] = w0[3];
d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n];
o1[n] = d1[n] - d2[n] + d3[n];
}
// save to top blob tm,why right 2,because the G' = G*2
outRow0[0] = o0[0] >> 2;
outRow0[1] = o0[1] >> 2;
outRow1[0] = o1[0] >> 2;
outRow1[1] = o1[1] >> 2;
out_tile += 16;
outRow0 += 2;
outRow1 += 2;
#endif // __ARM_NEON
}
outRow0 += outw;
outRow1 += outw;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat>& kernel_tm2, int inch, int outch)
{
Mat kernel_tm(6 * 6, inch, outch, 2ul);
// G
// const float ktm[6][3] = {
// { 1.0f/4, 0.0f, 0.0f},
// { -1.0f/6, -1.0f/6, -1.0f/6},
// { -1.0f/6, 1.0f/6, -1.0f/6},
// { 1.0f/24, 1.0f/12, 1.0f/6},
// { 1.0f/24, -1.0f/12, 1.0f/6},
// { 0.0f, 0.0f, 1.0f}
// };
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 6}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
for (int r = 0; r < 9; r++)
{
Mat kernel_tm_test(4 * 8, inch, outch / 8 + (outch % 8) / 4 + outch % 4, 2u);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const short* kernel0 = (const short*)kernel_tm.channel(p);
const short* kernel1 = (const short*)kernel_tm.channel(p + 1);
const short* kernel2 = (const short*)kernel_tm.channel(p + 2);
const short* kernel3 = (const short*)kernel_tm.channel(p + 3);
const short* kernel4 = (const short*)kernel_tm.channel(p + 4);
const short* kernel5 = (const short*)kernel_tm.channel(p + 5);
const short* kernel6 = (const short*)kernel_tm.channel(p + 6);
const short* kernel7 = (const short*)kernel_tm.channel(p + 7);
short* ktmp = kernel_tm_test.channel(p / 8);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p + 3 < outch; p += 4)
{
const short* kernel0 = (const short*)kernel_tm.channel(p);
const short* kernel1 = (const short*)kernel_tm.channel(p + 1);
const short* kernel2 = (const short*)kernel_tm.channel(p + 2);
const short* kernel3 = (const short*)kernel_tm.channel(p + 3);
short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p < outch; p++)
{
const short* kernel0 = (const short*)kernel_tm.channel(p);
short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4 + p % 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm2.push_back(kernel_tm_test);
}
}
static void conv3x3s1_winograd43_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles * 9, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 4;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
const signed char* r4 = r3 + w;
const signed char* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row<short>(q);
short* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row<short>(q);
short* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row<short>(q);
short* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row<short>(q);
short* out_tm4 = bottom_blob_tm.channel(tiles * 4 + j * nRowBlocks + i).row<short>(q);
short* out_tm5 = bottom_blob_tm.channel(tiles * 5 + j * nRowBlocks + i).row<short>(q);
short* out_tm6 = bottom_blob_tm.channel(tiles * 6 + j * nRowBlocks + i).row<short>(q);
short* out_tm7 = bottom_blob_tm.channel(tiles * 7 + j * nRowBlocks + i).row<short>(q);
short* out_tm8 = bottom_blob_tm.channel(tiles * 8 + j * nRowBlocks + i).row<short>(q);
#if __ARM_NEON
int8x8_t _d0, _d1, _d2, _d3, _d4, _d5;
int16x8_t _w0, _w1, _w2, _w3, _w4, _w5;
int16x8_t _t0, _t1, _t2, _t3, _t4, _t5;
int16x8_t _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = vld1_s8(r0);
_d1 = vld1_s8(r1);
_d2 = vld1_s8(r2);
_d3 = vld1_s8(r3);
_d4 = vld1_s8(r4);
_d5 = vld1_s8(r5);
int8x8_t _1_n = vdup_n_s8(-1);
int8x8_t _2_p = vdup_n_s8(2);
int8x8_t _2_n = vdup_n_s8(-2);
int8x8_t _4_p = vdup_n_s8(4);
int8x8_t _4_n = vdup_n_s8(-4);
int8x8_t _5_n = vdup_n_s8(-5);
int16x8_t _1_n_s16 = vdupq_n_s16(-1);
int16x8_t _2_p_s16 = vdupq_n_s16(2);
int16x8_t _2_n_s16 = vdupq_n_s16(-2);
int16x8_t _4_p_s16 = vdupq_n_s16(4);
int16x8_t _4_n_s16 = vdupq_n_s16(-4);
int16x8_t _5_n_s16 = vdupq_n_s16(-5);
// w = B_t * d
_w0 = vmull_s8(_d0, _4_p);
_w0 = vmlal_s8(_w0, _d2, _5_n);
_w0 = vaddw_s8(_w0, _d4);
_w1 = vmull_s8(_d1, _4_n);
_w1 = vmlal_s8(_w1, _d2, _4_n);
_w1 = vaddw_s8(_w1, _d3);
_w1 = vaddw_s8(_w1, _d4);
_w2 = vmull_s8(_d1, _4_p);
_w2 = vmlal_s8(_w2, _d2, _4_n);
_w2 = vmlal_s8(_w2, _d3, _1_n);
_w2 = vaddw_s8(_w2, _d4);
_w3 = vmull_s8(_d1, _2_n);
_w3 = vmlal_s8(_w3, _d2, _1_n);
_w3 = vmlal_s8(_w3, _d3, _2_p);
_w3 = vaddw_s8(_w3, _d4);
_w4 = vmull_s8(_d1, _2_p);
_w4 = vmlal_s8(_w4, _d2, _1_n);
_w4 = vmlal_s8(_w4, _d3, _2_n);
_w4 = vaddw_s8(_w4, _d4);
_w5 = vmull_s8(_d1, _4_p);
_w5 = vmlal_s8(_w5, _d3, _5_n);
_w5 = vaddw_s8(_w5, _d5);
// transpose d to d_t
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
// d = B_t * d_t
_n0 = vmulq_s16(_t0, _4_p_s16);
_n0 = vmlaq_s16(_n0, _t2, _5_n_s16);
_n0 = vaddq_s16(_n0, _t4);
_n1 = vmulq_s16(_t1, _4_n_s16);
_n1 = vmlaq_s16(_n1, _t2, _4_n_s16);
_n1 = vaddq_s16(_n1, _t3);
_n1 = vaddq_s16(_n1, _t4);
_n2 = vmulq_s16(_t1, _4_p_s16);
_n2 = vmlaq_s16(_n2, _t2, _4_n_s16);
_n2 = vmlaq_s16(_n2, _t3, _1_n_s16);
_n2 = vaddq_s16(_n2, _t4);
_n3 = vmulq_s16(_t1, _2_n_s16);
_n3 = vmlaq_s16(_n3, _t2, _1_n_s16);
_n3 = vmlaq_s16(_n3, _t3, _2_p_s16);
_n3 = vaddq_s16(_n3, _t4);
_n4 = vmulq_s16(_t1, _2_p_s16);
_n4 = vmlaq_s16(_n4, _t2, _1_n_s16);
_n4 = vmlaq_s16(_n4, _t3, _2_n_s16);
_n4 = vaddq_s16(_n4, _t4);
_n5 = vmulq_s16(_t1, _4_p_s16);
_n5 = vmlaq_s16(_n5, _t3, _5_n_s16);
_n5 = vaddq_s16(_n5, _t5);
// save to out_tm
out_tm0[0] = _n0[0];
out_tm0[1] = _n0[1];
out_tm0[2] = _n0[2];
out_tm0[3] = _n0[3];
out_tm1[0] = _n0[4];
out_tm1[1] = _n0[5];
out_tm1[2] = _n1[0];
out_tm1[3] = _n1[1];
out_tm2[0] = _n1[2];
out_tm2[1] = _n1[3];
out_tm2[2] = _n1[4];
out_tm2[3] = _n1[5];
out_tm3[0] = _n2[0];
out_tm3[1] = _n2[1];
out_tm3[2] = _n2[2];
out_tm3[3] = _n2[3];
out_tm4[0] = _n2[4];
out_tm4[1] = _n2[5];
out_tm4[2] = _n3[0];
out_tm4[3] = _n3[1];
out_tm5[0] = _n3[2];
out_tm5[1] = _n3[3];
out_tm5[2] = _n3[4];
out_tm5[3] = _n3[5];
out_tm6[0] = _n4[0];
out_tm6[1] = _n4[1];
out_tm6[2] = _n4[2];
out_tm6[3] = _n4[3];
out_tm7[0] = _n4[4];
out_tm7[1] = _n4[5];
out_tm7[2] = _n5[0];
out_tm7[3] = _n5[1];
out_tm8[0] = _n5[2];
out_tm8[1] = _n5[3];
out_tm8[2] = _n5[4];
out_tm8[3] = _n5[5];
#else
short d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
short w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
short t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __ARM_NEON
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
int* output4_tm = top_blob_tm.channel(p + 4);
int* output5_tm = top_blob_tm.channel(p + 5);
int* output6_tm = top_blob_tm.channel(p + 6);
int* output7_tm = top_blob_tm.channel(p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"eor v4.16b, v4.16b, v4.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"mov w4, %w20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0);
"ld1 {v8.4h}, [%8] \n"
"ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr);
"add %9, %9, #16 \n"
"ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8);
"add %9, %9, #16 \n"
"ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16);
"add %9, %9, #16 \n"
"ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24);
"add %8, %8, #8 \n"
"add %9, %9, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43)
"smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53)
"smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63)
"smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
"st1 {v4.4s}, [%4] \n" //
"st1 {v5.4s}, [%5] \n" //
"st1 {v6.4s}, [%6] \n" //
"st1 {v7.4s}, [%7] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"vmov.s32 q4, #0 \n"
"vmov.s32 q5, #0 \n"
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"mov r4, %20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr);
"add %9, #16 \n"
"vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8);
"add %9, #16 \n"
"vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16);
"add %9, #16 \n"
"vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24);
"add %9, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43)
"vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53)
"vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63)
"vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
"vst1.s32 {d8-d9}, [%4] \n"
"vst1.s32 {d10-d11}, [%5] \n"
"vst1.s32 {d12-d13}, [%6] \n"
"vst1.s32 {d14-d15}, [%7] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int sum4[4] = {0};
int sum5[4] = {0};
int sum6[4] = {0};
int sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
sum4[n] += (int)r0[n] * kptr[n + 16];
sum5[n] += (int)r0[n] * kptr[n + 20];
sum6[n] += (int)r0[n] * kptr[n + 24];
sum7[n] += (int)r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __ARM_NEON
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"mov w4, %w12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v8.4h}, [%4] \n"
"ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr);
"add %5, %5, #16 \n"
"ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8);
"add %4, %4, #8 \n"
"add %5, %5, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"mov r4, %12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr);
"add %5, #16 \n"
"vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8);
"add %5, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __ARM_NEON
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"mov w4, %w6 \n"
"0: \n" // for (int q=0; q<inch; q++)
"ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %1, %1, #8 \n"
"add %2, %2, #8 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"mov r4, %6 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0
"add %1, #8 \n"
"vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %2, #8 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q8", "q9");
#endif // __aarch64__
#else // __ARM_NEON
int sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __ARM_NEON
output0_tm += 36;
}
}
// for (int p=0; p<outch; p++)
// {
// Mat out0_tm = top_blob_tm.channel(p);
// const Mat kernel0_tm = kernel_tm.channel(p);
// for (int i=0; i<tiles; i++)
// {
// int* output0_tm = out0_tm.row<int>(i);
// int sum0[36] = {0};
// for (int q=0; q<inch; q++)
// {
// const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
// const short* k0 = kernel0_tm.row<short>(q);
// for (int n=0; n<36; n++)
// {
// sum0[n] += (int)r0[n] * k0[n];
// }
// }
// for (int n=0; n<36; n++)
// {
// output0_tm[n] = sum0[n];
// }
// }
// }
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* out_tile = top_blob_tm.channel(p);
int* outRow0 = top_blob_bordered.channel(p);
int* outRow1 = outRow0 + outw;
int* outRow2 = outRow0 + outw * 2;
int* outRow3 = outRow0 + outw * 3;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
#if __ARM_NEON
int32x4_t _s0, _s1, _s2, _s3, _s4, _s5;
int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n;
int32x4_t _w0, _w3;
int32x2_t _w0n, _w3n;
int32x4_t _d0, _d1, _d2, _d3, _d4, _d5;
int32x4_t _o0, _o1, _o2, _o3;
// load
_s0 = vld1q_s32(out_tile);
_s0n = vld1_s32(out_tile + 4);
_s1 = vld1q_s32(out_tile + 6);
_s1n = vld1_s32(out_tile + 10);
_s2 = vld1q_s32(out_tile + 12);
_s2n = vld1_s32(out_tile + 16);
_s3 = vld1q_s32(out_tile + 18);
_s3n = vld1_s32(out_tile + 22);
_s4 = vld1q_s32(out_tile + 24);
_s4n = vld1_s32(out_tile + 28);
_s5 = vld1q_s32(out_tile + 30);
_s5n = vld1_s32(out_tile + 34);
// w = A_T * W
int32x2_t _tp0 = {1, 4};
int32x2_t _tp1 = {2, 8};
// 4*s5[n]
int32x4_t _s5x4 = vshlq_n_s32(_s5, 2);
int32x2_t _s5x4n = vshl_n_s32(_s5n, 2);
int32x4_t _t1p2 = vaddq_s32(_s1, _s2);
int32x2_t _t1p2n = vadd_s32(_s1n, _s2n);
int32x4_t _t3p4 = vaddq_s32(_s3, _s4);
int32x2_t _t3p4n = vadd_s32(_s3n, _s4n);
int32x4_t _t1s2 = vsubq_s32(_s1, _s2);
int32x2_t _t1s2n = vsub_s32(_s1n, _s2n);
int32x4_t _t3s4 = vsubq_s32(_s3, _s4);
int32x2_t _t3s4n = vsub_s32(_s3n, _s4n);
_w0 = vaddq_s32(_s0, _t1p2);
_w0n = vadd_s32(_s0n, _t1p2n);
_w0 = vaddq_s32(_w0, _t3p4);
_w0n = vadd_s32(_w0n, _t3p4n);
_w0n = vmul_s32(_w0n, _tp0);
// _w2,_w2n
_t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1);
_t1p2n = vmla_lane_s32(_t1p2n, _t3p4n, _tp0, 1);
_t1p2n = vmul_s32(_t1p2n, _tp0);
_w3 = vaddq_s32(_s5x4, _t1s2);
_w3n = vadd_s32(_s5x4n, _t1s2n);
_w3 = vmlaq_lane_s32(_w3, _t3s4, _tp1, 1);
_w3n = vmla_lane_s32(_w3n, _t3s4n, _tp1, 1);
_w3n = vmul_s32(_w3n, _tp0);
// _w1, _w1n
_t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0);
_t1s2n = vmla_lane_s32(_t1s2n, _t3s4n, _tp1, 0);
_t1s2n = vmul_s32(_t1s2n, _tp0);
int32x4_t _w02n = vcombine_s32(_w0n, _t1p2n);
int32x4_t _w13n = vcombine_s32(_t1s2n, _w3n);
// transpose w to w_t
#if __aarch64__
int32x4_t _wt0 = vtrn1q_s32(_w0, _t1s2);
int32x4_t _wt1 = vtrn2q_s32(_w0, _t1s2);
int32x4_t _wt2 = vtrn1q_s32(_t1p2, _w3);
int32x4_t _wt3 = vtrn2q_s32(_t1p2, _w3);
int64x2_t _dt0 = vtrn1q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2));
int64x2_t _dt2 = vtrn2q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2));
int64x2_t _dt1 = vtrn1q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3));
int64x2_t _dt3 = vtrn2q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3));
_d0 = vreinterpretq_s32_s64(_dt0);
_d1 = vreinterpretq_s32_s64(_dt1);
_d2 = vreinterpretq_s32_s64(_dt2);
_d3 = vreinterpretq_s32_s64(_dt3);
_d4 = vtrn1q_s32(_w02n, _w13n);
_d5 = vtrn2q_s32(_w02n, _w13n);
#else
asm volatile(
"vtrn.32 %q[_w0], %q[_w1] \n"
"vtrn.32 %q[_w2], %q[_w3] \n"
"vswp %f[_w0], %e[_w2] \n"
"vswp %f[_w1], %e[_w3] \n"
"vtrn.32 %q[_w02n], %q[_w13n] \n"
: [_w0] "+w"(_w0),
[_w1] "+w"(_t1s2),
[_w2] "+w"(_t1p2),
[_w3] "+w"(_w3),
[_w02n] "+w"(_w02n),
[_w13n] "+w"(_w13n)
:
: "cc", "memory");
_d0 = _w0;
_d1 = _t1s2;
_d2 = _t1p2;
_d3 = _w3;
_d4 = _w02n;
_d5 = _w13n;
#endif
// Y = A_T * w_t
_t1p2 = vaddq_s32(_d1, _d2);
_t3p4 = vaddq_s32(_d3, _d4);
_t1s2 = vsubq_s32(_d1, _d2);
_t3s4 = vsubq_s32(_d3, _d4);
_o0 = vaddq_s32(_d0, _t1p2);
_o0 = vaddq_s32(_o0, _t3p4);
// _o2
_t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1);
_o3 = vaddq_s32(_d5, _t1s2);
_o3 = vmlaq_lane_s32(_o3, _t3s4, _tp1, 1);
// _o1
_t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0);
// save to top blob tm
float32x4_t _ot0 = vcvtq_f32_s32(_o0);
float32x4_t _ot1 = vcvtq_f32_s32(_t1s2);
float32x4_t _ot2 = vcvtq_f32_s32(_t1p2);
float32x4_t _ot3 = vcvtq_f32_s32(_o3);
_ot0 = vmulq_n_f32(_ot0, 0.0017361112);
_ot1 = vmulq_n_f32(_ot1, 0.0017361112);
_ot2 = vmulq_n_f32(_ot2, 0.0017361112);
_ot3 = vmulq_n_f32(_ot3, 0.0017361112);
_o0 = vcvtq_s32_f32(_ot0);
_o1 = vcvtq_s32_f32(_ot1);
_o2 = vcvtq_s32_f32(_ot2);
_o3 = vcvtq_s32_f32(_ot3);
vst1q_s32(outRow0, _o0);
vst1q_s32(outRow1, _o1);
vst1q_s32(outRow2, _o2);
vst1q_s32(outRow3, _o3);
#else
int s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
int w0[6], w1[6], w2[6], w3[6];
int d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
int o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 5; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n];
}
for (int n = 5; n < 6; n++)
{
w0[n] = 4 * (s0[n] + s1[n] + s2[n] + s3[n] + s4[n]);
w1[n] = 4 * (s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]);
w2[n] = 4 * (s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]);
w3[n] = 4 * (s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n]);
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] / 576;
outRow1[n] = o1[n] / 576;
outRow2[n] = o2[n] / 576;
outRow3[n] = o3[n] / 576;
}
#endif // __ARM_NEON
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw * 3;
outRow1 += outw * 3;
outRow2 += outw * 3;
outRow3 += outw * 3;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_dequant_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Mat& _bias, std::vector<float> scales_dequant, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles * 9, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 4;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
const signed char* r4 = r3 + w;
const signed char* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row<short>(q);
short* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row<short>(q);
short* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row<short>(q);
short* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row<short>(q);
short* out_tm4 = bottom_blob_tm.channel(tiles * 4 + j * nRowBlocks + i).row<short>(q);
short* out_tm5 = bottom_blob_tm.channel(tiles * 5 + j * nRowBlocks + i).row<short>(q);
short* out_tm6 = bottom_blob_tm.channel(tiles * 6 + j * nRowBlocks + i).row<short>(q);
short* out_tm7 = bottom_blob_tm.channel(tiles * 7 + j * nRowBlocks + i).row<short>(q);
short* out_tm8 = bottom_blob_tm.channel(tiles * 8 + j * nRowBlocks + i).row<short>(q);
#if __ARM_NEON
int8x8_t _d0, _d1, _d2, _d3, _d4, _d5;
int16x8_t _w0, _w1, _w2, _w3, _w4, _w5;
int16x8_t _t0, _t1, _t2, _t3, _t4, _t5;
int16x8_t _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = vld1_s8(r0);
_d1 = vld1_s8(r1);
_d2 = vld1_s8(r2);
_d3 = vld1_s8(r3);
_d4 = vld1_s8(r4);
_d5 = vld1_s8(r5);
int8x8_t _1_n = vdup_n_s8(-1);
int8x8_t _2_p = vdup_n_s8(2);
int8x8_t _2_n = vdup_n_s8(-2);
int8x8_t _4_p = vdup_n_s8(4);
int8x8_t _4_n = vdup_n_s8(-4);
int8x8_t _5_n = vdup_n_s8(-5);
int16x8_t _1_n_s16 = vdupq_n_s16(-1);
int16x8_t _2_p_s16 = vdupq_n_s16(2);
int16x8_t _2_n_s16 = vdupq_n_s16(-2);
int16x8_t _4_p_s16 = vdupq_n_s16(4);
int16x8_t _4_n_s16 = vdupq_n_s16(-4);
int16x8_t _5_n_s16 = vdupq_n_s16(-5);
// w = B_t * d
_w0 = vmull_s8(_d0, _4_p);
_w0 = vmlal_s8(_w0, _d2, _5_n);
_w0 = vaddw_s8(_w0, _d4);
_w1 = vmull_s8(_d1, _4_n);
_w1 = vmlal_s8(_w1, _d2, _4_n);
_w1 = vaddw_s8(_w1, _d3);
_w1 = vaddw_s8(_w1, _d4);
_w2 = vmull_s8(_d1, _4_p);
_w2 = vmlal_s8(_w2, _d2, _4_n);
_w2 = vmlal_s8(_w2, _d3, _1_n);
_w2 = vaddw_s8(_w2, _d4);
_w3 = vmull_s8(_d1, _2_n);
_w3 = vmlal_s8(_w3, _d2, _1_n);
_w3 = vmlal_s8(_w3, _d3, _2_p);
_w3 = vaddw_s8(_w3, _d4);
_w4 = vmull_s8(_d1, _2_p);
_w4 = vmlal_s8(_w4, _d2, _1_n);
_w4 = vmlal_s8(_w4, _d3, _2_n);
_w4 = vaddw_s8(_w4, _d4);
_w5 = vmull_s8(_d1, _4_p);
_w5 = vmlal_s8(_w5, _d3, _5_n);
_w5 = vaddw_s8(_w5, _d5);
// transpose d to d_t
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
// d = B_t * d_t
_n0 = vmulq_s16(_t0, _4_p_s16);
_n0 = vmlaq_s16(_n0, _t2, _5_n_s16);
_n0 = vaddq_s16(_n0, _t4);
_n1 = vmulq_s16(_t1, _4_n_s16);
_n1 = vmlaq_s16(_n1, _t2, _4_n_s16);
_n1 = vaddq_s16(_n1, _t3);
_n1 = vaddq_s16(_n1, _t4);
_n2 = vmulq_s16(_t1, _4_p_s16);
_n2 = vmlaq_s16(_n2, _t2, _4_n_s16);
_n2 = vmlaq_s16(_n2, _t3, _1_n_s16);
_n2 = vaddq_s16(_n2, _t4);
_n3 = vmulq_s16(_t1, _2_n_s16);
_n3 = vmlaq_s16(_n3, _t2, _1_n_s16);
_n3 = vmlaq_s16(_n3, _t3, _2_p_s16);
_n3 = vaddq_s16(_n3, _t4);
_n4 = vmulq_s16(_t1, _2_p_s16);
_n4 = vmlaq_s16(_n4, _t2, _1_n_s16);
_n4 = vmlaq_s16(_n4, _t3, _2_n_s16);
_n4 = vaddq_s16(_n4, _t4);
_n5 = vmulq_s16(_t1, _4_p_s16);
_n5 = vmlaq_s16(_n5, _t3, _5_n_s16);
_n5 = vaddq_s16(_n5, _t5);
// save to out_tm
out_tm0[0] = _n0[0];
out_tm0[1] = _n0[1];
out_tm0[2] = _n0[2];
out_tm0[3] = _n0[3];
out_tm1[0] = _n0[4];
out_tm1[1] = _n0[5];
out_tm1[2] = _n1[0];
out_tm1[3] = _n1[1];
out_tm2[0] = _n1[2];
out_tm2[1] = _n1[3];
out_tm2[2] = _n1[4];
out_tm2[3] = _n1[5];
out_tm3[0] = _n2[0];
out_tm3[1] = _n2[1];
out_tm3[2] = _n2[2];
out_tm3[3] = _n2[3];
out_tm4[0] = _n2[4];
out_tm4[1] = _n2[5];
out_tm4[2] = _n3[0];
out_tm4[3] = _n3[1];
out_tm5[0] = _n3[2];
out_tm5[1] = _n3[3];
out_tm5[2] = _n3[4];
out_tm5[3] = _n3[5];
out_tm6[0] = _n4[0];
out_tm6[1] = _n4[1];
out_tm6[2] = _n4[2];
out_tm6[3] = _n4[3];
out_tm7[0] = _n4[4];
out_tm7[1] = _n4[5];
out_tm7[2] = _n5[0];
out_tm7[3] = _n5[1];
out_tm8[0] = _n5[2];
out_tm8[1] = _n5[3];
out_tm8[2] = _n5[4];
out_tm8[3] = _n5[5];
#else
short d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
short w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
short t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __ARM_NEON
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
int* output4_tm = top_blob_tm.channel(p + 4);
int* output5_tm = top_blob_tm.channel(p + 5);
int* output6_tm = top_blob_tm.channel(p + 6);
int* output7_tm = top_blob_tm.channel(p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"eor v4.16b, v4.16b, v4.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"mov w4, %w20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0);
"ld1 {v8.4h}, [%8] \n"
"ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr);
"add %9, %9, #16 \n"
"ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8);
"add %9, %9, #16 \n"
"ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16);
"add %9, %9, #16 \n"
"ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24);
"add %8, %8, #8 \n"
"add %9, %9, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43)
"smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53)
"smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63)
"smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
"st1 {v4.4s}, [%4] \n" //
"st1 {v5.4s}, [%5] \n" //
"st1 {v6.4s}, [%6] \n" //
"st1 {v7.4s}, [%7] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"vmov.s32 q4, #0 \n"
"vmov.s32 q5, #0 \n"
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"mov r4, %20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr);
"add %9, #16 \n"
"vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8);
"add %9, #16 \n"
"vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16);
"add %9, #16 \n"
"vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24);
"add %9, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43)
"vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53)
"vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63)
"vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
"vst1.s32 {d8-d9}, [%4] \n"
"vst1.s32 {d10-d11}, [%5] \n"
"vst1.s32 {d12-d13}, [%6] \n"
"vst1.s32 {d14-d15}, [%7] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int sum4[4] = {0};
int sum5[4] = {0};
int sum6[4] = {0};
int sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
sum4[n] += (int)r0[n] * kptr[n + 16];
sum5[n] += (int)r0[n] * kptr[n + 20];
sum6[n] += (int)r0[n] * kptr[n + 24];
sum7[n] += (int)r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __ARM_NEON
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"mov w4, %w12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v8.4h}, [%4] \n"
"ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr);
"add %5, %5, #16 \n"
"ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8);
"add %4, %4, #8 \n"
"add %5, %5, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"mov r4, %12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr);
"add %5, #16 \n"
"vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8);
"add %5, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __ARM_NEON
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"mov w4, %w6 \n"
"0: \n" // for (int q=0; q<inch; q++)
"ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %1, %1, #8 \n"
"add %2, %2, #8 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"mov r4, %6 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0
"add %1, #8 \n"
"vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %2, #8 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q8", "q9");
#endif // __aarch64__
#else // __ARM_NEON
int sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __ARM_NEON
output0_tm += 36;
}
}
// for (int p=0; p<outch; p++)
// {
// Mat out0_tm = top_blob_tm.channel(p);
// const Mat kernel0_tm = kernel_tm.channel(p);
// for (int i=0; i<tiles; i++)
// {
// int* output0_tm = out0_tm.row<int>(i);
// int sum0[36] = {0};
// for (int q=0; q<inch; q++)
// {
// const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
// const short* k0 = kernel0_tm.row<short>(q);
// for (int n=0; n<36; n++)
// {
// sum0[n] += (int)r0[n] * k0[n];
// }
// }
// for (int n=0; n<36; n++)
// {
// output0_tm[n] = sum0[n];
// }
// }
// }
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* out_tile = top_blob_tm.channel(p);
float* outRow0 = top_blob_bordered.channel(p);
float* outRow1 = outRow0 + outw;
float* outRow2 = outRow0 + outw * 2;
float* outRow3 = outRow0 + outw * 3;
const float bias0 = bias ? bias[p] : 0.f;
const float scale_dequant0 = scales_dequant[p];
const float scale0 = scale_dequant0 / 576.0;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
#if __ARM_NEON
int32x4_t _s0, _s1, _s2, _s3, _s4, _s5;
int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n;
int32x4_t _w0, _w3;
int32x2_t _w0n, _w3n;
int32x4_t _d0, _d1, _d2, _d3, _d4, _d5;
int32x4_t _o0, _o3;
// load
_s0 = vld1q_s32(out_tile);
_s0n = vld1_s32(out_tile + 4);
_s1 = vld1q_s32(out_tile + 6);
_s1n = vld1_s32(out_tile + 10);
_s2 = vld1q_s32(out_tile + 12);
_s2n = vld1_s32(out_tile + 16);
_s3 = vld1q_s32(out_tile + 18);
_s3n = vld1_s32(out_tile + 22);
_s4 = vld1q_s32(out_tile + 24);
_s4n = vld1_s32(out_tile + 28);
_s5 = vld1q_s32(out_tile + 30);
_s5n = vld1_s32(out_tile + 34);
// w = A_T * W
int32x2_t _tp0 = {1, 4};
int32x2_t _tp1 = {2, 8};
// 4*s5[n]
int32x4_t _s5x4 = vshlq_n_s32(_s5, 2);
int32x2_t _s5x4n = vshl_n_s32(_s5n, 2);
int32x4_t _t1p2 = vaddq_s32(_s1, _s2);
int32x2_t _t1p2n = vadd_s32(_s1n, _s2n);
int32x4_t _t3p4 = vaddq_s32(_s3, _s4);
int32x2_t _t3p4n = vadd_s32(_s3n, _s4n);
int32x4_t _t1s2 = vsubq_s32(_s1, _s2);
int32x2_t _t1s2n = vsub_s32(_s1n, _s2n);
int32x4_t _t3s4 = vsubq_s32(_s3, _s4);
int32x2_t _t3s4n = vsub_s32(_s3n, _s4n);
_w0 = vaddq_s32(_s0, _t1p2);
_w0n = vadd_s32(_s0n, _t1p2n);
_w0 = vaddq_s32(_w0, _t3p4);
_w0n = vadd_s32(_w0n, _t3p4n);
_w0n = vmul_s32(_w0n, _tp0);
// _w2,_w2n
_t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1);
_t1p2n = vmla_lane_s32(_t1p2n, _t3p4n, _tp0, 1);
_t1p2n = vmul_s32(_t1p2n, _tp0);
_w3 = vaddq_s32(_s5x4, _t1s2);
_w3n = vadd_s32(_s5x4n, _t1s2n);
_w3 = vmlaq_lane_s32(_w3, _t3s4, _tp1, 1);
_w3n = vmla_lane_s32(_w3n, _t3s4n, _tp1, 1);
_w3n = vmul_s32(_w3n, _tp0);
// _w1, _w1n
_t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0);
_t1s2n = vmla_lane_s32(_t1s2n, _t3s4n, _tp1, 0);
_t1s2n = vmul_s32(_t1s2n, _tp0);
int32x4_t _w02n = vcombine_s32(_w0n, _t1p2n);
int32x4_t _w13n = vcombine_s32(_t1s2n, _w3n);
// transpose w to w_t
#if __aarch64__
int32x4_t _wt0 = vtrn1q_s32(_w0, _t1s2);
int32x4_t _wt1 = vtrn2q_s32(_w0, _t1s2);
int32x4_t _wt2 = vtrn1q_s32(_t1p2, _w3);
int32x4_t _wt3 = vtrn2q_s32(_t1p2, _w3);
int64x2_t _dt0 = vtrn1q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2));
int64x2_t _dt2 = vtrn2q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2));
int64x2_t _dt1 = vtrn1q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3));
int64x2_t _dt3 = vtrn2q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3));
_d0 = vreinterpretq_s32_s64(_dt0);
_d1 = vreinterpretq_s32_s64(_dt1);
_d2 = vreinterpretq_s32_s64(_dt2);
_d3 = vreinterpretq_s32_s64(_dt3);
_d4 = vtrn1q_s32(_w02n, _w13n);
_d5 = vtrn2q_s32(_w02n, _w13n);
#else
asm volatile(
"vtrn.32 %q[_w0], %q[_w1] \n"
"vtrn.32 %q[_w2], %q[_w3] \n"
"vswp %f[_w0], %e[_w2] \n"
"vswp %f[_w1], %e[_w3] \n"
"vtrn.32 %q[_w02n], %q[_w13n] \n"
: [_w0] "+w"(_w0),
[_w1] "+w"(_t1s2),
[_w2] "+w"(_t1p2),
[_w3] "+w"(_w3),
[_w02n] "+w"(_w02n),
[_w13n] "+w"(_w13n)
:
: "cc", "memory");
_d0 = _w0;
_d1 = _t1s2;
_d2 = _t1p2;
_d3 = _w3;
_d4 = _w02n;
_d5 = _w13n;
#endif
// Y = A_T * w_t
_t1p2 = vaddq_s32(_d1, _d2);
_t3p4 = vaddq_s32(_d3, _d4);
_t1s2 = vsubq_s32(_d1, _d2);
_t3s4 = vsubq_s32(_d3, _d4);
_o0 = vaddq_s32(_d0, _t1p2);
_o0 = vaddq_s32(_o0, _t3p4);
// _o2
_t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1);
_o3 = vaddq_s32(_d5, _t1s2);
_o3 = vmlaq_lane_s32(_o3, _t3s4, _tp1, 1);
// _o1
_t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0);
// save to top blob tm
float32x4_t _scale0 = vdupq_n_f32(scale0);
float32x4_t _out0_f32 = vdupq_n_f32(bias0);
float32x4_t _out1_f32 = vdupq_n_f32(bias0);
float32x4_t _out2_f32 = vdupq_n_f32(bias0);
float32x4_t _out3_f32 = vdupq_n_f32(bias0);
_out0_f32 = vmlaq_f32(_out0_f32, vcvtq_f32_s32(_o0), _scale0);
_out1_f32 = vmlaq_f32(_out1_f32, vcvtq_f32_s32(_t1s2), _scale0);
_out2_f32 = vmlaq_f32(_out2_f32, vcvtq_f32_s32(_t1p2), _scale0);
_out3_f32 = vmlaq_f32(_out3_f32, vcvtq_f32_s32(_o3), _scale0);
vst1q_f32(outRow0, _out0_f32);
vst1q_f32(outRow1, _out1_f32);
vst1q_f32(outRow2, _out2_f32);
vst1q_f32(outRow3, _out3_f32);
#else
int s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
int w0[6], w1[6], w2[6], w3[6];
int d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
int o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 5; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n];
}
for (int n = 5; n < 6; n++)
{
w0[n] = 4 * (s0[n] + s1[n] + s2[n] + s3[n] + s4[n]);
w1[n] = 4 * (s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]);
w2[n] = 4 * (s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]);
w3[n] = 4 * (s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n]);
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = (float)o0[n] * scale0 + bias0;
outRow1[n] = (float)o1[n] * scale0 + bias0;
outRow2[n] = (float)o2[n] * scale0 + bias0;
outRow3[n] = (float)o3[n] * scale0 + bias0;
}
#endif // __ARM_NEON
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw * 3;
outRow1 += outw * 3;
outRow2 += outw * 3;
outRow3 += outw * 3;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(8 * 9, inch, outch / 8 + outch % 8, (size_t)1u);
const signed char* kernel = _kernel;
int p = 0;
for (; p + 7 < outch; p += 8)
{
const signed char* k0 = kernel + (p + 0) * inch * 9;
const signed char* k1 = kernel + (p + 1) * inch * 9;
const signed char* k2 = kernel + (p + 2) * inch * 9;
const signed char* k3 = kernel + (p + 3) * inch * 9;
const signed char* k4 = kernel + (p + 4) * inch * 9;
const signed char* k5 = kernel + (p + 5) * inch * 9;
const signed char* k6 = kernel + (p + 6) * inch * 9;
const signed char* k7 = kernel + (p + 7) * inch * 9;
signed char* ktmp = kernel_tm.channel(p / 8);
for (int q = 0; q < inch; q++)
{
for (int k = 0; k < 9; k++)
{
ktmp[0] = k0[k];
ktmp[1] = k1[k];
ktmp[2] = k2[k];
ktmp[3] = k3[k];
ktmp[4] = k4[k];
ktmp[5] = k5[k];
ktmp[6] = k6[k];
ktmp[7] = k7[k];
ktmp += 8;
}
k0 += 9;
k1 += 9;
k2 += 9;
k3 += 9;
k4 += 9;
k5 += 9;
k6 += 9;
k7 += 9;
}
}
for (; p < outch; p++)
{
const signed char* k0 = kernel + (p + 0) * inch * 9;
signed char* ktmp = kernel_tm.channel(p / 8 + p % 8);
for (int q = 0; q < inch; q++)
{
for (int k = 0; k < 9; k++)
{
ktmp[k] = k0[k];
}
ktmp += 9;
k0 += 9;
}
}
}
static void conv3x3s2_packed_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
int nn_outch = outch >> 3;
int remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
Mat out0 = top_blob.channel(p + 0);
Mat out1 = top_blob.channel(p + 1);
Mat out2 = top_blob.channel(p + 2);
Mat out3 = top_blob.channel(p + 3);
Mat out4 = top_blob.channel(p + 4);
Mat out5 = top_blob.channel(p + 5);
Mat out6 = top_blob.channel(p + 6);
Mat out7 = top_blob.channel(p + 7);
out0.fill(0);
out1.fill(0);
out2.fill(0);
out3.fill(0);
out4.fill(0);
out5.fill(0);
out6.fill(0);
out7.fill(0);
const signed char* ktmp = _kernel.channel(p / 8);
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr2 = out2;
int* outptr3 = out3;
int* outptr4 = out4;
int* outptr5 = out5;
int* outptr6 = out6;
int* outptr7 = out7;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n" //ktmp
"ld2 {v3.8b, v4.8b}, [%9], #16 \n" //r0-r2
"ld2 {v5.8b, v6.8b}, [%9] \n"
"ld1 {v8.4s, v9.4s}, [%1] \n" //out0
"ld1 {v10.4s, v11.4s}, [%2] \n" //out1
"ld1 {v12.4s, v13.4s}, [%3] \n" //out2
"ld1 {v14.4s, v15.4s}, [%4] \n" //out3
"ld1 {v16.4s, v17.4s}, [%5] \n" //out4
"ld1 {v18.4s, v19.4s}, [%6] \n" //out5
"ld1 {v20.4s, v21.4s}, [%7] \n" //out6
"ld1 {v22.4s, v23.4s}, [%8] \n" //out7
"ext v7.8b, v3.8b, v5.8b, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" //(k00-k70)
"sshll v1.8h, v1.8b, #0 \n" //(k01-k71)
"sshll v2.8h, v2.8b, #0 \n" //(k02-k72)
"sshll v3.8h, v3.8b, #0 \n" // r0
"sshll v4.8h, v4.8b, #0 \n" // r1
"sshll v7.8h, v7.8b, #0 \n" // r2
// r0
"smlal v8.4s, v3.4h, v0.h[0] \n" // out0 += (r00-r07)*k00
"smlal2 v9.4s, v3.8h, v0.h[0] \n"
"smlal v10.4s, v3.4h, v0.h[1] \n" // out1 += (r00-r07)*k10
"smlal2 v11.4s, v3.8h, v0.h[1] \n"
"smlal v12.4s, v3.4h, v0.h[2] \n" // out2 += (r00-r07)*k20
"smlal2 v13.4s, v3.8h, v0.h[2] \n"
"smlal v14.4s, v3.4h, v0.h[3] \n" // out3 += (r00-r07)*k30
"smlal2 v15.4s, v3.8h, v0.h[3] \n"
"smlal v16.4s, v3.4h, v0.h[4] \n" // out4 += (r00-r07)*k40
"smlal2 v17.4s, v3.8h, v0.h[4] \n"
"smlal v18.4s, v3.4h, v0.h[5] \n" // out5 += (r00-r07)*k50
"smlal2 v19.4s, v3.8h, v0.h[5] \n"
"smlal v20.4s, v3.4h, v0.h[6] \n" // out6 += (r00-r07)*k60
"smlal2 v21.4s, v3.8h, v0.h[6] \n"
"smlal v22.4s, v3.4h, v0.h[7] \n" // out7 += (r00-r07)*k70
"smlal2 v23.4s, v3.8h, v0.h[7] \n"
// r1
"smlal v8.4s, v4.4h, v1.h[0] \n" // out0 += (r10-r17)*k01
"smlal2 v9.4s, v4.8h, v1.h[0] \n"
"smlal v10.4s, v4.4h, v1.h[1] \n" // out1 += (r10-r17)*k11
"smlal2 v11.4s, v4.8h, v1.h[1] \n"
"smlal v12.4s, v4.4h, v1.h[2] \n" // out2 += (r10-r17)*k21
"smlal2 v13.4s, v4.8h, v1.h[2] \n"
"smlal v14.4s, v4.4h, v1.h[3] \n" // out3 += (r10-r17)*k31
"smlal2 v15.4s, v4.8h, v1.h[3] \n"
"smlal v16.4s, v4.4h, v1.h[4] \n" // out4 += (r10-r17)*k41
"smlal2 v17.4s, v4.8h, v1.h[4] \n"
"smlal v18.4s, v4.4h, v1.h[5] \n" // out5 += (r10-r17)*k51
"smlal2 v19.4s, v4.8h, v1.h[5] \n"
"smlal v20.4s, v4.4h, v1.h[6] \n" // out6 += (r10-r17)*k61
"smlal2 v21.4s, v4.8h, v1.h[6] \n"
"smlal v22.4s, v4.4h, v1.h[7] \n" // out7 += (r10-r17)*k71
"smlal2 v23.4s, v4.8h, v1.h[7] \n"
// r2
"smlal v8.4s, v7.4h, v2.h[0] \n" // out0 += (r20-r27)*k02
"smlal2 v9.4s, v7.8h, v2.h[0] \n"
"smlal v10.4s, v7.4h, v2.h[1] \n" // out1 += (r20-r27)*k12
"smlal2 v11.4s, v7.8h, v2.h[1] \n"
"smlal v12.4s, v7.4h, v2.h[2] \n" // out2 += (r20-r27)*k22
"smlal2 v13.4s, v7.8h, v2.h[2] \n"
"smlal v14.4s, v7.4h, v2.h[3] \n" // out3 += (r20-r27)*k32
"smlal2 v15.4s, v7.8h, v2.h[3] \n"
"smlal v16.4s, v7.4h, v2.h[4] \n" // out4 += (r20-r27)*k42
"smlal2 v17.4s, v7.8h, v2.h[4] \n"
"smlal v18.4s, v7.4h, v2.h[5] \n" // out5 += (r20-r27)*k52
"smlal2 v19.4s, v7.8h, v2.h[5] \n"
"smlal v20.4s, v7.4h, v2.h[6] \n" // out6 += (r20-r27)*k62
"smlal2 v21.4s, v7.8h, v2.h[6] \n"
"smlal v22.4s, v7.4h, v2.h[7] \n" // out7 += (r20-r27)*k72
"smlal2 v23.4s, v7.8h, v2.h[7] \n"
"ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n" //ktmp
"ld2 {v3.8b, v4.8b}, [%10], #16 \n" //r3-r5
"ld2 {v5.8b, v6.8b}, [%10] \n"
"ext v7.8b, v3.8b, v5.8b, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" //(k03-k73)
"sshll v1.8h, v1.8b, #0 \n" //(k04-k74)
"sshll v2.8h, v2.8b, #0 \n" //(k05-k75)
"sshll v3.8h, v3.8b, #0 \n" // r3
"sshll v4.8h, v4.8b, #0 \n" // r4
"sshll v7.8h, v7.8b, #0 \n" // r5
// r3
"smlal v8.4s, v3.4h, v0.h[0] \n" // out0 += (r30-r37)*k03
"smlal2 v9.4s, v3.8h, v0.h[0] \n"
"smlal v10.4s, v3.4h, v0.h[1] \n" // out1 += (r30-r37)*k13
"smlal2 v11.4s, v3.8h, v0.h[1] \n"
"smlal v12.4s, v3.4h, v0.h[2] \n" // out2 += (r30-r37)*k23
"smlal2 v13.4s, v3.8h, v0.h[2] \n"
"smlal v14.4s, v3.4h, v0.h[3] \n" // out3 += (r30-r37)*k33
"smlal2 v15.4s, v3.8h, v0.h[3] \n"
"smlal v16.4s, v3.4h, v0.h[4] \n" // out4 += (r30-r37)*k43
"smlal2 v17.4s, v3.8h, v0.h[4] \n"
"smlal v18.4s, v3.4h, v0.h[5] \n" // out5 += (r30-r37)*k53
"smlal2 v19.4s, v3.8h, v0.h[5] \n"
"smlal v20.4s, v3.4h, v0.h[6] \n" // out6 += (r30-r37)*k63
"smlal2 v21.4s, v3.8h, v0.h[6] \n"
"smlal v22.4s, v3.4h, v0.h[7] \n" // out7 += (r30-r37)*k73
"smlal2 v23.4s, v3.8h, v0.h[7] \n"
// r4
"smlal v8.4s, v4.4h, v1.h[0] \n" // out0 += (r40-r47)*k04
"smlal2 v9.4s, v4.8h, v1.h[0] \n"
"smlal v10.4s, v4.4h, v1.h[1] \n" // out1 += (r40-r47)*k14
"smlal2 v11.4s, v4.8h, v1.h[1] \n"
"smlal v12.4s, v4.4h, v1.h[2] \n" // out2 += (r40-r47)*k24
"smlal2 v13.4s, v4.8h, v1.h[2] \n"
"smlal v14.4s, v4.4h, v1.h[3] \n" // out3 += (r40-r47)*k34
"smlal2 v15.4s, v4.8h, v1.h[3] \n"
"smlal v16.4s, v4.4h, v1.h[4] \n" // out4 += (r40-r47)*k44
"smlal2 v17.4s, v4.8h, v1.h[4] \n"
"smlal v18.4s, v4.4h, v1.h[5] \n" // out5 += (r40-r47)*k54
"smlal2 v19.4s, v4.8h, v1.h[5] \n"
"smlal v20.4s, v4.4h, v1.h[6] \n" // out6 += (r40-r47)*k64
"smlal2 v21.4s, v4.8h, v1.h[6] \n"
"smlal v22.4s, v4.4h, v1.h[7] \n" // out7 += (r40-r47)*k74
"smlal2 v23.4s, v4.8h, v1.h[7] \n"
// r5
"smlal v8.4s, v7.4h, v2.h[0] \n" // out0 += (r50-r57)*k05
"smlal2 v9.4s, v7.8h, v2.h[0] \n"
"smlal v10.4s, v7.4h, v2.h[1] \n" // out1 += (r50-r57)*k15
"smlal2 v11.4s, v7.8h, v2.h[1] \n"
"smlal v12.4s, v7.4h, v2.h[2] \n" // out2 += (r50-r57)*k25
"smlal2 v13.4s, v7.8h, v2.h[2] \n"
"smlal v14.4s, v7.4h, v2.h[3] \n" // out3 += (r50-r57)*k35
"smlal2 v15.4s, v7.8h, v2.h[3] \n"
"smlal v16.4s, v7.4h, v2.h[4] \n" // out4 += (r50-r57)*k45
"smlal2 v17.4s, v7.8h, v2.h[4] \n"
"smlal v18.4s, v7.4h, v2.h[5] \n" // out5 += (r50-r57)*k55
"smlal2 v19.4s, v7.8h, v2.h[5] \n"
"smlal v20.4s, v7.4h, v2.h[6] \n" // out6 += (r50-r57)*k65
"smlal2 v21.4s, v7.8h, v2.h[6] \n"
"smlal v22.4s, v7.4h, v2.h[7] \n" // out7 += (r50-r57)*k75
"smlal2 v23.4s, v7.8h, v2.h[7] \n"
"ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n" //ktmp
"ld2 {v3.8b, v4.8b}, [%11], #16 \n" //r6-r8
"ld2 {v5.8b, v6.8b}, [%11] \n"
"ext v7.8b, v3.8b, v5.8b, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" //(k06-k76)
"sshll v1.8h, v1.8b, #0 \n" //(k07-k77)
"sshll v2.8h, v2.8b, #0 \n" //(k08-k78)
"sshll v3.8h, v3.8b, #0 \n" // r6
"sshll v4.8h, v4.8b, #0 \n" // r7
"sshll v7.8h, v7.8b, #0 \n" // r8
// r6
"smlal v8.4s, v3.4h, v0.h[0] \n" // out0 += (r60-r67)*k06
"smlal2 v9.4s, v3.8h, v0.h[0] \n"
"smlal v10.4s, v3.4h, v0.h[1] \n" // out1 += (r60-r67)*k16
"smlal2 v11.4s, v3.8h, v0.h[1] \n"
"smlal v12.4s, v3.4h, v0.h[2] \n" // out2 += (r60-r67)*k26
"smlal2 v13.4s, v3.8h, v0.h[2] \n"
"smlal v14.4s, v3.4h, v0.h[3] \n" // out3 += (r60-r67)*k36
"smlal2 v15.4s, v3.8h, v0.h[3] \n"
"smlal v16.4s, v3.4h, v0.h[4] \n" // out4 += (r60-r67)*k46
"smlal2 v17.4s, v3.8h, v0.h[4] \n"
"smlal v18.4s, v3.4h, v0.h[5] \n" // out5 += (r60-r67)*k56
"smlal2 v19.4s, v3.8h, v0.h[5] \n"
"smlal v20.4s, v3.4h, v0.h[6] \n" // out6 += (r60-r67)*k66
"smlal2 v21.4s, v3.8h, v0.h[6] \n"
"smlal v22.4s, v3.4h, v0.h[7] \n" // out7 += (r60-r67)*k76
"smlal2 v23.4s, v3.8h, v0.h[7] \n"
// r7
"smlal v8.4s, v4.4h, v1.h[0] \n" // out0 += (r70-r77)*k07
"smlal2 v9.4s, v4.8h, v1.h[0] \n"
"smlal v10.4s, v4.4h, v1.h[1] \n" // out1 += (r70-r77)*k17
"smlal2 v11.4s, v4.8h, v1.h[1] \n"
"smlal v12.4s, v4.4h, v1.h[2] \n" // out2 += (r70-r77)*k27
"smlal2 v13.4s, v4.8h, v1.h[2] \n"
"smlal v14.4s, v4.4h, v1.h[3] \n" // out3 += (r70-r77)*k37
"smlal2 v15.4s, v4.8h, v1.h[3] \n"
"smlal v16.4s, v4.4h, v1.h[4] \n" // out4 += (r70-r77)*k47
"smlal2 v17.4s, v4.8h, v1.h[4] \n"
"smlal v18.4s, v4.4h, v1.h[5] \n" // out5 += (r70-r77)*k57
"smlal2 v19.4s, v4.8h, v1.h[5] \n"
"smlal v20.4s, v4.4h, v1.h[6] \n" // out6 += (r70-r77)*k67
"smlal2 v21.4s, v4.8h, v1.h[6] \n"
"smlal v22.4s, v4.4h, v1.h[7] \n" // out7 += (r70-r77)*k77
"smlal2 v23.4s, v4.8h, v1.h[7] \n"
// r8
"smlal v8.4s, v7.4h, v2.h[0] \n" // out0 += (r80-r87)*k08
"smlal2 v9.4s, v7.8h, v2.h[0] \n"
"smlal v10.4s, v7.4h, v2.h[1] \n" // out1 += (r80-r87)*k18
"smlal2 v11.4s, v7.8h, v2.h[1] \n"
"smlal v12.4s, v7.4h, v2.h[2] \n" // out2 += (r80-r87)*k28
"smlal2 v13.4s, v7.8h, v2.h[2] \n"
"smlal v14.4s, v7.4h, v2.h[3] \n" // out3 += (r80-r87)*k38
"smlal2 v15.4s, v7.8h, v2.h[3] \n"
"smlal v16.4s, v7.4h, v2.h[4] \n" // out4 += (r80-r87)*k48
"smlal2 v17.4s, v7.8h, v2.h[4] \n"
"smlal v18.4s, v7.4h, v2.h[5] \n" // out5 += (r80-r87)*k58
"smlal2 v19.4s, v7.8h, v2.h[5] \n"
"smlal v20.4s, v7.4h, v2.h[6] \n" // out6 += (r80-r87)*k68
"smlal2 v21.4s, v7.8h, v2.h[6] \n"
"smlal v22.4s, v7.4h, v2.h[7] \n" // out7 += (r80-r87)*k78
"smlal2 v23.4s, v7.8h, v2.h[7] \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
"st1 {v10.4s, v11.4s}, [%2], #32 \n"
"st1 {v12.4s, v13.4s}, [%3], #32 \n"
"st1 {v14.4s, v15.4s}, [%4], #32 \n"
"st1 {v16.4s, v17.4s}, [%5], #32 \n"
"st1 {v18.4s, v19.4s}, [%6], #32 \n"
"st1 {v20.4s, v21.4s}, [%7], #32 \n"
"st1 {v22.4s, v23.4s}, [%8], #32 \n"
"subs %w0, %w0, #1 \n"
"sub %12, %12, #72 \n" // reset ktmp
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(r0), // %9
"=r"(r1), // %10
"=r"(r2), // %11
"=r"(ktmp) // %12
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(r0),
"10"(r1),
"11"(r2),
"12"(ktmp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
#else // __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #128] \n"
"vld1.s32 {d16-d17}, [%1] \n" // out0
"pld [%2, #128] \n"
"vld1.s32 {d18-d19}, [%2] \n" // out1
"pld [%3, #128] \n"
"vld1.s32 {d20-d21}, [%3] \n" // out2
"pld [%4, #128] \n"
"vld1.s32 {d22-d23}, [%4] \n" // out3
// r0
"pld [%9, #64] \n"
"vld2.s8 {d8-d9}, [%9] \n" // d8(a00 a02 a04 a06 a08 a010 a012 a014), d9(a01 a03 a05 a07 a09 a011 a013 a015)
"add %9, #8 \n"
"pld [%12, #64] \n"
"vld1.s8 {d0-d2}, [%12]! \n" // d0(k00-k70) d1(k01-k71) d2(k02-k72)
"pld [%5, #128] \n"
"vld1.s32 {d24-d25}, [%5] \n" // out4
"pld [%6, #128] \n"
"vld1.s32 {d26-d27}, [%6] \n" // out5
"vmovl.s8 q2, d2 \n" // q2(k02-k72)
"vmovl.s8 q1, d1 \n" // q1(k01-k71)
"vmovl.s8 q0, d0 \n" // q0(k00-k70)
"vext.s8 d12, d8, d8, #1 \n" // d12(a02 a04 a06 a08 x x x x)
"pld [%7, #128] \n"
"vld1.s32 {d28-d29}, [%7] \n" // out6
"vmovl.s8 q5, d9 \n" // q5(a01 a03 a05 a07 a09 a011 a013 a015) d11
"vmovl.s8 q4, d8 \n" // q4(a00 a02 a04 a06 a08 a010 a012 a014) d9
"vmovl.s8 q6, d12 \n" // q6(a02 a04 a06 a08 a010 a012 a014 a016) d13
"pld [%8, #128] \n"
"vld1.s32 {d30-d31}, [%8] \n" // out7
"vmlal.s16 q8, d8, d0[0] \n" // sum0 += (a00 a02 a04 a06) * k00
"vmlal.s16 q9, d8, d0[1] \n" // sum1 += (a00 a02 a04 a06) * k10
"vmlal.s16 q10, d8, d0[2] \n" // sum2 += (a00 a02 a04 a06) * k20
"vmlal.s16 q11, d8, d0[3] \n" // sum3 += (a00 a02 a04 a06) * k30
"vmlal.s16 q12, d8, d1[0] \n" // sum4 += (a00 a02 a04 a06) * k40
"vmlal.s16 q13, d8, d1[1] \n" // sum5 += (a00 a02 a04 a06) * k50
"vmlal.s16 q14, d8, d1[2] \n" // sum6 += (a00 a02 a04 a06) * k60
"vmlal.s16 q15, d8, d1[3] \n" // sum7 += (a00 a02 a04 a06) * k70
"vmlal.s16 q8, d10, d2[0] \n" // sum0 += (a01-a07) * k01
"vmlal.s16 q9, d10, d2[1] \n" // sum1 += (a01-a07) * k11
"vmlal.s16 q10, d10, d2[2] \n" // sum2 += (a01-a07) * k21
"vmlal.s16 q11, d10, d2[3] \n" // sum3 += (a01-a07) * k31
"vmlal.s16 q12, d10, d3[0] \n" // sum4 += (a01-a07) * k41
"vmlal.s16 q13, d10, d3[1] \n" // sum5 += (a01-a07) * k51
"vmlal.s16 q14, d10, d3[2] \n" // sum6 += (a01-a07) * k61
"vmlal.s16 q15, d10, d3[3] \n" // sum7 += (a01-a07) * k71
"pld [%10, #64] \n"
"vld2.s8 {d8-d9}, [%10] \n" // d8(a10 a12 a14 a16 a18 a110 a112 a114), d9(a11 a13 a15 a17 a19 a111 a113 a115)
"add %10, #8 \n"
"vmlal.s16 q8, d12, d4[0] \n" // sum0 += (a02-a08) * k02
"vmlal.s16 q9, d12, d4[1] \n" // sum1 += (a02-a08) * k12
"vmlal.s16 q10, d12, d4[2] \n" // sum2 += (a02-a08) * k22
"vmlal.s16 q11, d12, d4[3] \n" // sum3 += (a02-a08) * k32
"pld [%12, #64] \n"
"vld1.s8 {d0-d2}, [%12]! \n" // d0(k03-k73) d1(k04-k74) d2(k05-k75)
"vmlal.s16 q12, d12, d5[0] \n" // sum4 += (a02-a08) * k42
"vmlal.s16 q13, d12, d5[1] \n" // sum5 += (a02-a08) * k52
"vmlal.s16 q14, d12, d5[2] \n" // sum6 += (a02-a08) * k62
"vmlal.s16 q15, d12, d5[3] \n" // sum7 += (a02-a08) * k72
// r1
"vext.s8 d12, d8, d8, #1 \n" // d12(a12 a14 a16 a18 x x x x)
"vmovl.s8 q2, d2 \n" // q2(k05-k75)
"vmovl.s8 q1, d1 \n" // q1(k04-k74)
"vmovl.s8 q0, d0 \n" // q0(k03-k73)
"vmovl.s8 q5, d9 \n" // q5(a11-a115)
"vmovl.s8 q4, d8 \n" // q4(a10-a114)
"vmovl.s8 q6, d12 \n" // q6(a12-a116)
"vmlal.s16 q8, d8, d0[0] \n" // sum0 += (a10-a16) * k03
"vmlal.s16 q9, d8, d0[1] \n" // sum1 += (a10-a16) * k13
"vmlal.s16 q10, d8, d0[2] \n" // sum2 += (a10-a16) * k23
"vmlal.s16 q11, d8, d0[3] \n" // sum3 += (a10-a16) * k33
"vmlal.s16 q12, d8, d1[0] \n" // sum4 += (a10-a16) * k43
"vmlal.s16 q13, d8, d1[1] \n" // sum5 += (a10-a16) * k53
"vmlal.s16 q14, d8, d1[2] \n" // sum6 += (a10-a16) * k63
"vmlal.s16 q15, d8, d1[3] \n" // sum7 += (a10-a16) * k73
"vmlal.s16 q8, d10, d2[0] \n" // sum0 += (a11-a17) * k04
"vmlal.s16 q9, d10, d2[1] \n" // sum1 += (a11-a17) * k14
"vmlal.s16 q10, d10, d2[2] \n" // sum2 += (a11-a17) * k24
"vmlal.s16 q11, d10, d2[3] \n" // sum3 += (a11-a17) * k34
"vmlal.s16 q12, d10, d3[0] \n" // sum4 += (a11-a17) * k44
"vmlal.s16 q13, d10, d3[1] \n" // sum5 += (a11-a17) * k54
"vmlal.s16 q14, d10, d3[2] \n" // sum6 += (a11-a17) * k64
"vmlal.s16 q15, d10, d3[3] \n" // sum7 += (a11-a17) * k74
"pld [%11, #64] \n"
"vld2.s8 {d8-d9}, [%11] \n" // d8(a20 a22 a24 a26 a28 a210 a212 a214), d9(a21 a23 a25 a27 a29 a211 a213 a215)
"add %11, #8 \n"
"vmlal.s16 q8, d12, d4[0] \n" // sum0 += (a12-a18) * k05
"vmlal.s16 q9, d12, d4[1] \n" // sum1 += (a12-a18) * k15
"vmlal.s16 q10, d12, d4[2] \n" // sum2 += (a12-a18) * k25
"vmlal.s16 q11, d12, d4[3] \n" // sum3 += (a12-a18) * k35
"pld [%12, #64] \n"
"vld1.s8 {d0-d2}, [%12]! \n" // d0(k06-k76) d1(k07-k77) d2(k08-k78)
"vmlal.s16 q12, d12, d5[0] \n" // sum4 += (a12-a18) * k45
"vmlal.s16 q13, d12, d5[1] \n" // sum5 += (a12-a18) * k55
"vmlal.s16 q14, d12, d5[2] \n" // sum6 += (a12-a18) * k65
"vmlal.s16 q15, d12, d5[3] \n" // sum7 += (a12-a18) * k75
// r2
"vext.s8 d12, d8, d8, #1 \n" // d12(a22 a24 a26 a28 x x x x)
"vmovl.s8 q2, d2 \n" // q2(k08-k78)
"vmovl.s8 q1, d1 \n" // q1(k07-k77)
"vmovl.s8 q0, d0 \n" // q0(k06-k76)
"vmovl.s8 q5, d9 \n" // q5(a21-a215)
"vmovl.s8 q4, d8 \n" // q4(a20-a214)
"vmovl.s8 q6, d12 \n" // q6(a22-a216)
"vmlal.s16 q8, d8, d0[0] \n" // sum0 += (a20-a26) * k06
"vmlal.s16 q9, d8, d0[1] \n" // sum1 += (a20-a26) * k16
"vmlal.s16 q10, d8, d0[2] \n" // sum2 += (a20-a26) * k26
"vmlal.s16 q11, d8, d0[3] \n" // sum3 += (a20-a26) * k36
"vmlal.s16 q12, d8, d1[0] \n" // sum4 += (a20-a26) * k46
"vmlal.s16 q13, d8, d1[1] \n" // sum5 += (a20-a26) * k56
"vmlal.s16 q14, d8, d1[2] \n" // sum6 += (a20-a26) * k66
"vmlal.s16 q15, d8, d1[3] \n" // sum7 += (a20-a26) * k76
"vmlal.s16 q8, d10, d2[0] \n" // sum0 += (a21-a27) * k07
"vmlal.s16 q9, d10, d2[1] \n" // sum1 += (a21-a27) * k17
"vmlal.s16 q10, d10, d2[2] \n" // sum2 += (a21-a27) * k27
"vmlal.s16 q11, d10, d2[3] \n" // sum3 += (a21-a27) * k37
"vmlal.s16 q12, d10, d3[0] \n" // sum4 += (a21-a27) * k47
"vmlal.s16 q13, d10, d3[1] \n" // sum5 += (a21-a27) * k57
"vmlal.s16 q14, d10, d3[2] \n" // sum6 += (a21-a27) * k67
"vmlal.s16 q15, d10, d3[3] \n" // sum7 += (a21-a27) * k77
"vmlal.s16 q8, d12, d4[0] \n" // sum0 += (a22-a28) * k08
"vmlal.s16 q9, d12, d4[1] \n" // sum1 += (a22-a28) * k18
"vmlal.s16 q10, d12, d4[2] \n" // sum2 += (a22-a28) * k28
"vmlal.s16 q11, d12, d4[3] \n" // sum3 += (a22-a28) * k38
"vmlal.s16 q12, d12, d5[0] \n" // sum4 += (a22-a28) * k48
"vmlal.s16 q13, d12, d5[1] \n" // sum5 += (a22-a28) * k58
"vmlal.s16 q14, d12, d5[2] \n" // sum6 += (a22-a28) * k68
"vmlal.s16 q15, d12, d5[3] \n" // sum7 += (a22-a28) * k78
// save s32 to memory
"sub %12, %12, #72 \n"
"vst1.s32 {d16-d17}, [%1]! \n" // out0
"vst1.s32 {d18-d19}, [%2]! \n" // out1
"vst1.s32 {d20-d21}, [%3]! \n" // out2
"vst1.s32 {d22-d23}, [%4]! \n" // out3
"subs %0, #1 \n"
"vst1.s32 {d24-d25}, [%5]! \n" // out4
"vst1.s32 {d26-d27}, [%6]! \n" // out5
"vst1.s32 {d28-d29}, [%7]! \n" // out6
"vst1.s32 {d30-d31}, [%8]! \n" // out7
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(r0), // %9
"=r"(r1), // %10
"=r"(r2), // %11
"=r"(ktmp) // %12
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(r0),
"10"(r1),
"11"(r2),
"12"(ktmp)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
#if __ARM_NEON
#if __aarch64__
int8x8_t _r0_s8 = vld1_s8(r0); // (a00 a01 a02 ....)
int8x8_t _r1_s8 = vld1_s8(r1); // (a10 a11 a12 ....)
int8x8_t _r2_s8 = vld1_s8(r2); // (a20 a21 a22 ....)
int16x8_t _r0 = vmovl_s8(_r0_s8);
int16x8_t _r1 = vmovl_s8(_r1_s8);
int16x8_t _r2 = vmovl_s8(_r2_s8);
int32x4_t _sum03 = {};
int32x4_t _sum47 = {};
_sum03 = vld1q_lane_s32(outptr0, _sum03, 0); // out0
_sum03 = vld1q_lane_s32(outptr1, _sum03, 1); // out1
_sum03 = vld1q_lane_s32(outptr2, _sum03, 2); // out2
_sum03 = vld1q_lane_s32(outptr3, _sum03, 3); // out3
_sum47 = vld1q_lane_s32(outptr4, _sum47, 0); // out4
_sum47 = vld1q_lane_s32(outptr5, _sum47, 1); // out5
_sum47 = vld1q_lane_s32(outptr6, _sum47, 2); // out6
_sum47 = vld1q_lane_s32(outptr7, _sum47, 3); // out7
// k0 - k2
int8x8_t _k0_8 = vld1_s8(ktmp); //(k00-k70)
int8x8_t _k1_8 = vld1_s8(ktmp + 8); //(k01-k71)
int8x8_t _k2_8 = vld1_s8(ktmp + 16); //(k02-k72)
int16x8_t _k0 = vmovl_s8(_k0_8);
int16x8_t _k1 = vmovl_s8(_k1_8);
int16x8_t _k2 = vmovl_s8(_k2_8);
int32x4_t _sum0 = vmull_laneq_s16(vget_low_s16(_k0), _r0, 0);
int32x4_t _sum0n = vmull_laneq_s16(vget_high_s16(_k0), _r0, 0);
int32x4_t _sum1 = vmull_laneq_s16(vget_low_s16(_k1), _r0, 1);
int32x4_t _sum1n = vmull_laneq_s16(vget_high_s16(_k1), _r0, 1);
_sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r0, 2);
_sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r0, 2);
// k3 - k5
_k0_8 = vld1_s8(ktmp + 24); //(k03-k73)
_k1_8 = vld1_s8(ktmp + 32); //(k04-k74)
_k2_8 = vld1_s8(ktmp + 40); //(k05-k75)
_k0 = vmovl_s8(_k0_8);
_k1 = vmovl_s8(_k1_8);
_k2 = vmovl_s8(_k2_8);
_sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r1, 0);
_sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r1, 0);
_sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r1, 1);
_sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r1, 1);
_sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r1, 2);
_sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r1, 2);
// k6 - k8
_k0_8 = vld1_s8(ktmp + 48); //(k06-k76)
_k1_8 = vld1_s8(ktmp + 56); //(k07-k77)
_k2_8 = vld1_s8(ktmp + 64); //(k08-k78)
_k0 = vmovl_s8(_k0_8);
_k1 = vmovl_s8(_k1_8);
_k2 = vmovl_s8(_k2_8);
_sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r2, 0);
_sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r2, 0);
_sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r2, 1);
_sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r2, 1);
_sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r2, 2);
_sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r2, 2);
_sum0 = vaddq_s32(_sum0, _sum1);
_sum0n = vaddq_s32(_sum0n, _sum1n);
_sum03 = vaddq_s32(_sum03, _sum0);
_sum47 = vaddq_s32(_sum47, _sum0n);
vst1q_lane_s32(outptr0, _sum03, 0);
vst1q_lane_s32(outptr1, _sum03, 1);
vst1q_lane_s32(outptr2, _sum03, 2);
vst1q_lane_s32(outptr3, _sum03, 3);
vst1q_lane_s32(outptr4, _sum47, 0);
vst1q_lane_s32(outptr5, _sum47, 1);
vst1q_lane_s32(outptr6, _sum47, 2);
vst1q_lane_s32(outptr7, _sum47, 3);
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
#else // __aarch64__
asm volatile(
"pld [%8, #64] \n"
"vld1.s8 {d0}, [%8] \n" // d0(a00 a01 a02 ....)
"pld [%9, #64] \n"
"vld1.s8 {d2}, [%9] \n" // d2(a10 a11 a12 ....)
"pld [%10, #64] \n"
"vld1.s8 {d4}, [%10] \n" // d4(a20 a21 a22 ....)
"pld [%11, #64] \n"
"vld1.s8 {d6-d8}, [%11]! \n" // d6(k00-k70) d7(k01-k71) d8(k02-k72)
"vmovl.s8 q0, d0 \n" // d0(a00 a01 a02 x)
"vmovl.s8 q1, d2 \n" // d2(a10 a11 a12 x)
"vmovl.s8 q2, d4 \n" // d4(a20 a21 a22 x)
"vmovl.s8 q5, d8 \n" // d10(k02-k32) d11(k42-k72)
"vmovl.s8 q4, d7 \n" // d8(k01-k31) d9(k41-k71)
"vmovl.s8 q3, d6 \n" // d6(k00-k30) d7(k40-k70)
"vld1.s32 {d20[0]}, [%0] \n" // out0 q10
"vld1.s32 {d20[1]}, [%1] \n" // out1
"vld1.s32 {d21[0]}, [%2] \n" // out2
"vld1.s32 {d21[1]}, [%3] \n" // out3
"pld [%11, #64] \n"
"vld1.s8 {d24-d26}, [%11]! \n"
"vmovl.s8 q14, d26 \n" // d28(k05-k35) d29(k45-k75)
"vmovl.s8 q13, d25 \n" // d26(k04-k34) d27(k44-k74)
"vmovl.s8 q12, d24 \n" // d24(k03-k33) d25(k43-k73)
"vld1.s32 {d22[0]}, [%4] \n" // out4 q11
"vld1.s32 {d22[1]}, [%5] \n" // out5
"vld1.s32 {d23[0]}, [%6] \n" // out6
"vld1.s32 {d23[1]}, [%7] \n" // out7
"vmull.s16 q6, d6, d0[0] \n" // a00 x (k00-k30)
"vmull.s16 q7, d7, d0[0] \n" // a00 x (k40-k70)
"vmull.s16 q8, d8, d0[1] \n" // a01 x (k01-k31)
"vmull.s16 q9, d9, d0[1] \n" // a01 x (k41-k71)
"vmlal.s16 q10, d10, d0[2] \n" // a02 x (k02-k32)
"vmlal.s16 q11, d11, d0[2] \n" // a02 x (k42-k72)
"pld [%11, #64] \n"
"vld1.s8 {d6-d8}, [%11]! \n"
"vmovl.s8 q5, d8 \n" // d10(k08-k38) d11(k48-k78)
"vmovl.s8 q4, d7 \n" // d8(k07-k37) d9(k47-k77)
"vmovl.s8 q3, d6 \n" // d6(k06-k36) d7(k46-k76)
"vmlal.s16 q6, d24, d2[0] \n" // a10 x (k03-k33)
"vmlal.s16 q7, d25, d2[0] \n" // a10 x (k43-k73)
"vmlal.s16 q8, d26, d2[1] \n" // a11 x (k04-k34)
"vmlal.s16 q9, d27, d2[1] \n" // a11 x (k44-k74)
"vmlal.s16 q10, d28, d2[2] \n" // a12 x (k05-k35)
"vmlal.s16 q11, d29, d2[2] \n" // a12 x (k45-k75)
"vmlal.s16 q6, d6, d4[0] \n" // a20 x (k06-k36)
"vmlal.s16 q7, d7, d4[0] \n" // a20 x (k46-k76)
"vmlal.s16 q8, d8, d4[1] \n" // a21 x (k07-k37)
"vmlal.s16 q9, d9, d4[1] \n" // a21 x (k47-k77)
"vmlal.s16 q10, d10, d4[2] \n" // a22 x (k08-k38)
"vmlal.s16 q11, d11, d4[2] \n" // a22 x (k48-k78)
"vadd.s32 q8, q8, q6 \n"
"vadd.s32 q9, q9, q7 \n"
"sub %11, %11, #72 \n"
"vadd.s32 q10, q10, q8 \n"
"vadd.s32 q11, q11, q9 \n"
"vst1.s32 {d20[0]}, [%0]! \n" // out0
"vst1.s32 {d20[1]}, [%1]! \n" // out1
"vst1.s32 {d21[0]}, [%2]! \n" // out2
"vst1.s32 {d21[1]}, [%3]! \n" // out3
"vst1.s32 {d22[0]}, [%4]! \n" // out4
"vst1.s32 {d22[1]}, [%5]! \n" // out5
"vst1.s32 {d23[0]}, [%6]! \n" // out6
"vst1.s32 {d23[1]}, [%7]! \n" // out7
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(ktmp) // %11
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(ktmp)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
#else // __ARM_NEON
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int sum5 = 0;
int sum6 = 0;
int sum7 = 0;
sum0 += (int)r0[0] * ktmp[0];
sum1 += (int)r0[0] * ktmp[1];
sum2 += (int)r0[0] * ktmp[2];
sum3 += (int)r0[0] * ktmp[3];
sum4 += (int)r0[0] * ktmp[4];
sum5 += (int)r0[0] * ktmp[5];
sum6 += (int)r0[0] * ktmp[6];
sum7 += (int)r0[0] * ktmp[7];
ktmp += 8;
sum0 += (int)r0[1] * ktmp[0];
sum1 += (int)r0[1] * ktmp[1];
sum2 += (int)r0[1] * ktmp[2];
sum3 += (int)r0[1] * ktmp[3];
sum4 += (int)r0[1] * ktmp[4];
sum5 += (int)r0[1] * ktmp[5];
sum6 += (int)r0[1] * ktmp[6];
sum7 += (int)r0[1] * ktmp[7];
ktmp += 8;
sum0 += (int)r0[2] * ktmp[0];
sum1 += (int)r0[2] * ktmp[1];
sum2 += (int)r0[2] * ktmp[2];
sum3 += (int)r0[2] * ktmp[3];
sum4 += (int)r0[2] * ktmp[4];
sum5 += (int)r0[2] * ktmp[5];
sum6 += (int)r0[2] * ktmp[6];
sum7 += (int)r0[2] * ktmp[7];
ktmp += 8;
sum0 += (int)r1[0] * ktmp[0];
sum1 += (int)r1[0] * ktmp[1];
sum2 += (int)r1[0] * ktmp[2];
sum3 += (int)r1[0] * ktmp[3];
sum4 += (int)r1[0] * ktmp[4];
sum5 += (int)r1[0] * ktmp[5];
sum6 += (int)r1[0] * ktmp[6];
sum7 += (int)r1[0] * ktmp[7];
ktmp += 8;
sum0 += (int)r1[1] * ktmp[0];
sum1 += (int)r1[1] * ktmp[1];
sum2 += (int)r1[1] * ktmp[2];
sum3 += (int)r1[1] * ktmp[3];
sum4 += (int)r1[1] * ktmp[4];
sum5 += (int)r1[1] * ktmp[5];
sum6 += (int)r1[1] * ktmp[6];
sum7 += (int)r1[1] * ktmp[7];
ktmp += 8;
sum0 += (int)r1[2] * ktmp[0];
sum1 += (int)r1[2] * ktmp[1];
sum2 += (int)r1[2] * ktmp[2];
sum3 += (int)r1[2] * ktmp[3];
sum4 += (int)r1[2] * ktmp[4];
sum5 += (int)r1[2] * ktmp[5];
sum6 += (int)r1[2] * ktmp[6];
sum7 += (int)r1[2] * ktmp[7];
ktmp += 8;
sum0 += (int)r2[0] * ktmp[0];
sum1 += (int)r2[0] * ktmp[1];
sum2 += (int)r2[0] * ktmp[2];
sum3 += (int)r2[0] * ktmp[3];
sum4 += (int)r2[0] * ktmp[4];
sum5 += (int)r2[0] * ktmp[5];
sum6 += (int)r2[0] * ktmp[6];
sum7 += (int)r2[0] * ktmp[7];
ktmp += 8;
sum0 += (int)r2[1] * ktmp[0];
sum1 += (int)r2[1] * ktmp[1];
sum2 += (int)r2[1] * ktmp[2];
sum3 += (int)r2[1] * ktmp[3];
sum4 += (int)r2[1] * ktmp[4];
sum5 += (int)r2[1] * ktmp[5];
sum6 += (int)r2[1] * ktmp[6];
sum7 += (int)r2[1] * ktmp[7];
ktmp += 8;
sum0 += (int)r2[2] * ktmp[0];
sum1 += (int)r2[2] * ktmp[1];
sum2 += (int)r2[2] * ktmp[2];
sum3 += (int)r2[2] * ktmp[3];
sum4 += (int)r2[2] * ktmp[4];
sum5 += (int)r2[2] * ktmp[5];
sum6 += (int)r2[2] * ktmp[6];
sum7 += (int)r2[2] * ktmp[7];
ktmp += 8;
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
*outptr4 += sum4;
*outptr5 += sum5;
*outptr6 += sum6;
*outptr7 += sum7;
ktmp -= 8 * 9;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
ktmp += 8 * 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out = top_blob.channel(p);
out.fill(0);
const signed char* ktmp = _kernel.channel(p / 8 + p % 8);
for (int q = 0; q < inch; q++)
{
int* outptr = out;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld1 {v0.8b, v1.8b}, [%5] \n" //ktmp
"ld2 {v2.8b, v3.8b}, [%2], #16 \n" //r0-r2
"ld2 {v4.8b, v5.8b}, [%2] \n"
"ld2 {v6.8b, v7.8b}, [%3], #16 \n" //r3-r5
"ld2 {v8.8b, v9.8b}, [%3] \n"
"ld2 {v10.8b, v11.8b}, [%4], #16 \n" //r6-r8
"ld2 {v12.8b, v13.8b}, [%4] \n"
"ld1 {v14.4s, v15.4s}, [%1] \n" //out0
"ext v4.8b, v2.8b, v4.8b, #1 \n"
"ext v8.8b, v6.8b, v8.8b, #1 \n"
"ext v12.8b, v10.8b, v12.8b, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" //(k0-k7)
"sshll v1.8h, v1.8b, #0 \n" //(k8)
"sshll v2.8h, v2.8b, #0 \n" // r0
"sshll v3.8h, v3.8b, #0 \n" // r1
"sshll v4.8h, v4.8b, #0 \n" // r2
"sshll v6.8h, v6.8b, #0 \n" // r3
"sshll v7.8h, v7.8b, #0 \n" // r4
"sshll v8.8h, v8.8b, #0 \n" // r5
"sshll v10.8h, v10.8b, #0 \n" // r6
"sshll v11.8h, v11.8b, #0 \n" // r7
"sshll v12.8h, v12.8b, #0 \n" // r8
// r0
"smull v16.4s, v2.4h, v0.h[0] \n" // out = r0*k0
"smull2 v17.4s, v2.8h, v0.h[0] \n"
"smull v18.4s, v3.4h, v0.h[1] \n" // outn = r1*k1
"smull2 v19.4s, v3.8h, v0.h[1] \n"
"smlal v16.4s, v4.4h, v0.h[2] \n" // out = r2*k2
"smlal2 v17.4s, v4.8h, v0.h[2] \n"
"smlal v18.4s, v6.4h, v0.h[3] \n" // outn = r3*k3
"smlal2 v19.4s, v6.8h, v0.h[3] \n"
"smlal v16.4s, v7.4h, v0.h[4] \n" // out = r4*k4
"smlal2 v17.4s, v7.8h, v0.h[4] \n"
"smlal v18.4s, v8.4h, v0.h[5] \n" // outn = r5*k5
"smlal2 v19.4s, v8.8h, v0.h[5] \n"
"smlal v16.4s, v10.4h, v0.h[6] \n" // out = r6*k6
"smlal2 v17.4s, v10.8h, v0.h[6] \n"
"smlal v18.4s, v11.4h, v0.h[7] \n" // outn = r7*k7
"smlal2 v19.4s, v11.8h, v0.h[7] \n"
"smlal v16.4s, v12.4h, v1.h[0] \n" // out = r8*k8
"smlal2 v17.4s, v12.8h, v1.h[0] \n"
"add v8.4s, v16.4s, v18.4s \n"
"add v9.4s, v17.4s, v19.4s \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(ktmp) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(ktmp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
#else
if (nn > 0)
{
asm volatile(
"vld1.s8 {d0-d1}, [%5] \n" // d0(k0 - k7) d1(k8 ...)
"vmovl.s8 q1, d1 \n" // d2(k8 ...)
"vmovl.s8 q0, d0 \n" // d0(k0 - k3) d1(k4 - k7)
"0: \n"
"pld [%2, #192] \n"
"vld2.s8 {d4-d5}, [%2]! \n" // r0 d4(a00 a02 ... a014) d5(a01 a03 ... a015)
"vld2.s8 {d8-d9}, [%2] \n" // d8(a016 ....)
"vld2.s8 {d10-d11}, [%3]! \n" // r1 d10(a10 a12 ... a114) d11(a11 a13 ... a115)
"vld2.s8 {d14-d15}, [%3] \n" // d14(a116 ....)
"vld2.s8 {d16-d17}, [%4]! \n" // r2 d16(a20 a22 ... a214) d17(a21 a23 ... a215)
"vld2.s8 {d20-d21}, [%4] \n" // d20(a216 ....)
"vld1.s32 {d22-d25}, [%1] \n" // q11(out0 - out3) q12(out4 - out7)
"vext.s8 d8, d4, d8, #1 \n" // d8(a02 a04 ... a016)
"vext.s8 d14, d10, d14, #1 \n" // d14(a12 a14 ... a116)
"vext.s8 d20, d16, d20, #1 \n" // d20(a22 a24 ... a216)
"vmovl.s8 q3, d5 \n" // q3(a01 a03 ... a015)
"vmovl.s8 q2, d4 \n" // q2(a00 a02 ... a014)
"vmovl.s8 q4, d8 \n" // q4(a02 a04 ... a016)
"vmovl.s8 q6, d11 \n" // q6(a11 a13 ... a115)
"vmovl.s8 q5, d10 \n" // q5(a10 a12 ... a114)
"vmovl.s8 q7, d14 \n" // q7(a12 a14 ... a116)
"vmovl.s8 q9, d17 \n" // q9(a21 a23 ... a215)
"vmovl.s8 q8, d16 \n" // q8(a20 a22 ... a214)
"vmovl.s8 q10, d20 \n" // q10(a22 a24 ... a216)
"vmlal.s16 q11, d4, d0[0] \n" // k0
"vmlal.s16 q12, d5, d0[0] \n"
"vmull.s16 q13, d6, d0[1] \n" // k1
"vmull.s16 q14, d7, d0[1] \n"
"vmlal.s16 q11, d8, d0[2] \n" // k2
"vmlal.s16 q12, d9, d0[2] \n"
"vmlal.s16 q13, d12, d1[0] \n" // k4
"vmlal.s16 q14, d13, d1[0] \n"
"vmlal.s16 q11, d10, d0[3] \n" // k3
"vmlal.s16 q12, d11, d0[3] \n"
"vmlal.s16 q13, d14, d1[1] \n" // k5
"vmlal.s16 q14, d15, d1[1] \n"
"vmlal.s16 q11, d16, d1[2] \n" // k6
"vmlal.s16 q12, d17, d1[2] \n"
"vmlal.s16 q13, d18, d1[3] \n" // k7
"vmlal.s16 q14, d19, d1[3] \n"
"vmlal.s16 q11, d20, d2[0] \n" // k8
"vmlal.s16 q12, d21, d2[0] \n"
"vadd.s32 q11, q11, q13 \n"
"vadd.s32 q12, q12, q14 \n"
"vst1.32 {d22-d25}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(ktmp) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(ktmp)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
if (remain > 0)
{
#if __ARM_NEON
int8x8_t _k01234567s8 = vld1_s8(ktmp);
int8x8_t _k8xxxxxxxs8 = vld1_s8(ktmp + 8);
int8x8_t _k34567xxxs8 = vext_s8(_k01234567s8, _k01234567s8, 3);
int8x8_t _k678xxxxxs8 = vext_s8(_k01234567s8, _k8xxxxxxxs8, 6);
int16x8_t _k0123_s16 = vmovl_s8(_k01234567s8);
int16x8_t _k3456_s16 = vmovl_s8(_k34567xxxs8);
int16x8_t _k678x_s16 = vmovl_s8(_k678xxxxxs8);
#endif
for (; remain > 0; remain--)
{
#if __ARM_NEON
int8x8_t _r00s8 = vld1_s8(r0);
int8x8_t _r10s8 = vld1_s8(r1);
int8x8_t _r20s8 = vld1_s8(r2);
int16x8_t _r00s16 = vmovl_s8(_r00s8);
int16x8_t _r10s16 = vmovl_s8(_r10s8);
int16x8_t _r20s16 = vmovl_s8(_r20s8);
int32x4_t _sum = vmull_s16(vget_low_s16(_r00s16), vget_low_s16(_k0123_s16));
_sum = vmlal_s16(_sum, vget_low_s16(_r10s16), vget_low_s16(_k3456_s16));
_sum = vmlal_s16(_sum, vget_low_s16(_r20s16), vget_low_s16(_k678x_s16));
_sum = vsetq_lane_s32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_s32(_sum);
#else
int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum));
_ss = vpadd_s32(_ss, _ss);
*outptr = vget_lane_s32(_ss, 0);
#endif // __aarch64__
#else
int sum = 0;
sum += (int)r0[0] * ktmp[0];
sum += (int)r0[1] * ktmp[1];
sum += (int)r0[2] * ktmp[2];
sum += (int)r1[0] * ktmp[3];
sum += (int)r1[1] * ktmp[4];
sum += (int)r1[2] * ktmp[5];
sum += (int)r2[0] * ktmp[6];
sum += (int)r2[1] * ktmp[7];
sum += (int)r2[2] * ktmp[8];
*outptr += sum;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
ktmp += 9;
}
}
}
|
GB_unop__identity_fc32_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fc32_bool
// op(A') function: GB_unop_tran__identity_fc32_bool
// C type: GxB_FC32_t
// A type: bool
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fc32_bool
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pf_fold.c | /*
* partiton function for single RNA secondary structures
*
* Simplified interfaces and backward compatibility
* wrappers
*
* Ivo L Hofacker + Ronny Lorenz
* Vienna RNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
/*
* ###########################################
* # deprecated functions below #
*###########################################
*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h> /* #defines FLT_MAX ... */
#include <limits.h>
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/gquad.h"
#include "ViennaRNA/constraints/hard.h"
#include "ViennaRNA/constraints/soft.h"
#include "ViennaRNA/mfe.h"
#include "ViennaRNA/part_func.h"
#ifdef _OPENMP
#include <omp.h>
#endif
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
PUBLIC int st_back = 0;
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
/* some backward compatibility stuff */
PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL;
PRIVATE int backward_compat = 0;
#ifdef _OPENMP
#pragma omp threadprivate(backward_compat_compound, backward_compat)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE float
wrap_pf_fold(const char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained,
int is_circular);
PRIVATE double
wrap_mean_bp_distance(FLT_OR_DBL *p,
int length,
int *index,
int turn);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PRIVATE double
wrap_mean_bp_distance(FLT_OR_DBL *p,
int length,
int *index,
int turn)
{
int i, j;
double d = 0.;
/* compute the mean base pair distance in the thermodynamic ensemble */
/* <d> = \sum_{a,b} p_a p_b d(S_a,S_b)
* this can be computed from the pair probs p_ij as
* <d> = \sum_{ij} p_{ij}(1-p_{ij}) */
for (i = 1; i <= length; i++)
for (j = i + turn + 1; j <= length; j++)
d += p[index[i] - j] * (1 - p[index[i] - j]);
return 2 * d;
}
PRIVATE float
wrap_pf_fold(const char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained,
int is_circular)
{
vrna_fold_compound_t *vc;
vrna_md_t md;
vc = NULL;
/* we need vrna_exp_param_t datastructure to correctly init default hard constraints */
if (parameters)
md = parameters->model_details;
else
set_model_details(&md); /* get global default parameters */
md.circ = is_circular;
md.compute_bpp = calculate_bppm;
vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_DEFAULT);
/* prepare exp_params and set global pf_scale */
vc->exp_params = vrna_exp_params(&(vc->params->model_details));
vc->exp_params->pf_scale = pf_scale;
if (is_constrained && structure) {
unsigned int constraint_options = 0;
constraint_options |= VRNA_CONSTRAINT_DB
| VRNA_CONSTRAINT_DB_PIPE
| VRNA_CONSTRAINT_DB_DOT
| VRNA_CONSTRAINT_DB_X
| VRNA_CONSTRAINT_DB_ANG_BRACK
| VRNA_CONSTRAINT_DB_RND_BRACK;
vrna_constraints_add(vc, (const char *)structure, constraint_options);
}
if (backward_compat_compound && backward_compat)
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = vc;
backward_compat = 1;
iindx = backward_compat_compound->iindx;
return vrna_pf(vc, structure);
}
PUBLIC vrna_ep_t *
stackProb(double cutoff)
{
if (!(backward_compat_compound && backward_compat)) {
vrna_message_warning("stackProb: "
"run pf_fold() first!");
return NULL;
} else if (!backward_compat_compound->exp_matrices->probs) {
vrna_message_warning("stackProb: "
"probs == NULL!");
return NULL;
}
return vrna_stack_prob(backward_compat_compound, cutoff);
}
PUBLIC char *
centroid(int length,
double *dist)
{
if (pr == NULL) {
vrna_message_warning("centroid: "
"pr == NULL. You need to call pf_fold() before centroid()");
return NULL;
}
return vrna_centroid_from_probs(length, dist, pr);
}
PUBLIC double
mean_bp_dist(int length)
{
/* compute the mean base pair distance in the thermodynamic ensemble */
/* <d> = \sum_{a,b} p_a p_b d(S_a,S_b)
* this can be computed from the pair probs p_ij as
* <d> = \sum_{ij} p_{ij}(1-p_{ij}) */
int i, j, *my_iindx;
double d = 0;
if (pr == NULL) {
vrna_message_warning("mean_bp_dist: "
"pr == NULL. You need to call pf_fold() before mean_bp_dist()");
return d;
}
my_iindx = vrna_idx_row_wise(length);
for (i = 1; i <= length; i++)
for (j = i + TURN + 1; j <= length; j++)
d += pr[my_iindx[i] - j] * (1 - pr[my_iindx[i] - j]);
free(my_iindx);
return 2 * d;
}
/* get the free energy of a subsequence from the q[] array */
PUBLIC double
get_subseq_F(int i,
int j)
{
if (backward_compat_compound) {
if (backward_compat_compound->exp_matrices) {
if (backward_compat_compound->exp_matrices->q) {
int *my_iindx = backward_compat_compound->iindx;
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
FLT_OR_DBL *q = backward_compat_compound->exp_matrices->q;
return (-log(q[my_iindx[i] - j]) - (j - i + 1) * log(pf_params->pf_scale)) * pf_params->kT /
1000.0;
}
}
}
vrna_message_warning("get_subseq_F: "
"call pf_fold() to fill q[] array before calling get_subseq_F()");
return 0.; /* we will never get to this point */
}
/*----------------------------------------------------------------------*/
PUBLIC double
expHairpinEnergy(int u,
int type,
short si1,
short sj1,
const char *string)
{
/* compute Boltzmann weight of a hairpin loop, multiply by scale[u+2] */
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
double q, kT;
kT = pf_params->kT; /* kT in cal/mol */
if (u <= 30)
q = pf_params->exphairpin[u];
else
q = pf_params->exphairpin[30] * exp(-(pf_params->lxc * log(u / 30.)) * 10. / kT);
if ((tetra_loop) && (u == 4)) {
char tl[7] = {
0
}, *ts;
strncpy(tl, string, 6);
if ((ts = strstr(pf_params->Tetraloops, tl)))
return pf_params->exptetra[(ts - pf_params->Tetraloops) / 7];
}
if ((tetra_loop) && (u == 6)) {
char tl[9] = {
0
}, *ts;
strncpy(tl, string, 6);
if ((ts = strstr(pf_params->Hexaloops, tl)))
return pf_params->exphex[(ts - pf_params->Hexaloops) / 9];
}
if (u == 3) {
char tl[6] = {
0
}, *ts;
strncpy(tl, string, 5);
if ((ts = strstr(pf_params->Triloops, tl)))
return pf_params->exptri[(ts - pf_params->Triloops) / 6];
if (type > 2)
q *= pf_params->expTermAU;
} else {
/* no mismatches for tri-loops */
q *= pf_params->expmismatchH[type][si1][sj1];
}
return q;
}
PUBLIC double
expLoopEnergy(int u1,
int u2,
int type,
int type2,
short si1,
short sj1,
short sp1,
short sq1)
{
/* compute Boltzmann weight of interior loop,
* multiply by scale[u1+u2+2] for scaling */
double z = 0;
int no_close = 0;
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
if ((no_closingGU) && ((type2 == 3) || (type2 == 4) || (type == 2) || (type == 4)))
no_close = 1;
if ((u1 == 0) && (u2 == 0)) {
/* stack */
z = pf_params->expstack[type][type2];
} else if (no_close == 0) {
if ((u1 == 0) || (u2 == 0)) {
/* bulge */
int u;
u = (u1 == 0) ? u2 : u1;
z = pf_params->expbulge[u];
if (u2 + u1 == 1) {
z *= pf_params->expstack[type][type2];
} else {
if (type > 2)
z *= pf_params->expTermAU;
if (type2 > 2)
z *= pf_params->expTermAU;
}
} else {
/* interior loop */
if (u1 + u2 == 2) {
/* size 2 is special */
z = pf_params->expint11[type][type2][si1][sj1];
} else if ((u1 == 1) && (u2 == 2)) {
z = pf_params->expint21[type][type2][si1][sq1][sj1];
} else if ((u1 == 2) && (u2 == 1)) {
z = pf_params->expint21[type2][type][sq1][si1][sp1];
} else if ((u1 == 2) && (u2 == 2)) {
z = pf_params->expint22[type][type2][si1][sp1][sq1][sj1];
} else if (((u1 == 2) && (u2 == 3)) || ((u1 == 3) && (u2 == 2))) {
/*2-3 is special*/
z = pf_params->expinternal[5] *
pf_params->expmismatch23I[type][si1][sj1] *
pf_params->expmismatch23I[type2][sq1][sp1];
z *= pf_params->expninio[2][1];
} else if ((u1 == 1) || (u2 == 1)) {
/*1-n is special*/
z = pf_params->expinternal[u1 + u2] *
pf_params->expmismatch1nI[type][si1][sj1] *
pf_params->expmismatch1nI[type2][sq1][sp1];
z *= pf_params->expninio[2][abs(u1 - u2)];
} else {
z = pf_params->expinternal[u1 + u2] *
pf_params->expmismatchI[type][si1][sj1] *
pf_params->expmismatchI[type2][sq1][sp1];
z *= pf_params->expninio[2][abs(u1 - u2)];
}
}
}
return z;
}
PUBLIC void
init_pf_circ_fold(int length)
{
/* DO NOTHING */
}
PUBLIC void
init_pf_fold(int length)
{
/* DO NOTHING */
}
/**
*** Allocate memory for all matrices and other stuff
**/
PUBLIC void
free_pf_arrays(void)
{
if (backward_compat_compound && backward_compat) {
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = NULL;
backward_compat = 0;
iindx = NULL;
}
}
PUBLIC FLT_OR_DBL *
export_bppm(void)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->probs)
return backward_compat_compound->exp_matrices->probs;
return NULL;
}
/*
* -------------------------------------------------------------------------
* make arrays used for pf_fold available to other routines
*/
PUBLIC int
get_pf_arrays(short **S_p,
short **S1_p,
char **ptype_p,
FLT_OR_DBL **qb_p,
FLT_OR_DBL **qm_p,
FLT_OR_DBL **q1k_p,
FLT_OR_DBL **qln_p)
{
if (backward_compat_compound) {
if (backward_compat_compound->exp_matrices) {
if (backward_compat_compound->exp_matrices->qb) {
*S_p = backward_compat_compound->sequence_encoding2;
*S1_p = backward_compat_compound->sequence_encoding;
*ptype_p = backward_compat_compound->ptype_pf_compat;
*qb_p = backward_compat_compound->exp_matrices->qb;
*qm_p = backward_compat_compound->exp_matrices->qm;
*q1k_p = backward_compat_compound->exp_matrices->q1k;
*qln_p = backward_compat_compound->exp_matrices->qln;
return 1;
}
}
}
return 0;
}
/*-----------------------------------------------------------------*/
PUBLIC float
pf_fold(const char *sequence,
char *structure)
{
return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 0);
}
PUBLIC float
pf_circ_fold(const char *sequence,
char *structure)
{
return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 1);
}
PUBLIC float
pf_fold_par(const char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained,
int is_circular)
{
return wrap_pf_fold(sequence, structure, parameters, calculate_bppm, is_constrained, is_circular);
}
PUBLIC char *
pbacktrack(char *seq)
{
int n = (int)strlen(seq);
return vrna_pbacktrack5(backward_compat_compound, n);
}
PUBLIC char *
pbacktrack5(char *seq,
int length)
{
/* the seq parameter must no differ to the one stored globally anyway, so we just ignore it */
return vrna_pbacktrack5(backward_compat_compound, length);
}
PUBLIC char *
pbacktrack_circ(char *seq)
{
char *structure;
vrna_md_t *md;
structure = NULL;
if (backward_compat_compound) {
md = &(backward_compat_compound->exp_params->model_details);
if (md->circ && backward_compat_compound->exp_matrices->qm2)
structure = vrna_pbacktrack(backward_compat_compound);
}
return structure;
}
PUBLIC void
update_pf_params(int length)
{
if (backward_compat_compound && backward_compat) {
vrna_md_t md;
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
PUBLIC void
update_pf_params_par(int length,
vrna_exp_param_t *parameters)
{
if (backward_compat_compound && backward_compat) {
vrna_md_t md;
if (parameters) {
vrna_exp_params_subst(backward_compat_compound, parameters);
} else {
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
}
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
PUBLIC char *
get_centroid_struct_gquad_pr(int length,
double *dist)
{
return vrna_centroid(backward_compat_compound, dist);
}
PUBLIC void
assign_plist_gquad_from_pr(vrna_ep_t **pl,
int length, /* ignored */
double cut_off)
{
if (!backward_compat_compound)
*pl = NULL;
else if (!backward_compat_compound->exp_matrices->probs)
*pl = NULL;
else
*pl = vrna_plist_from_probs(backward_compat_compound, cut_off);
}
PUBLIC double
mean_bp_distance(int length)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->probs)
return vrna_mean_bp_distance(backward_compat_compound);
vrna_message_warning("mean_bp_distance: "
"you need to call vrna_pf_fold first");
return 0.; /* we will never get to this point */
}
PUBLIC double
mean_bp_distance_pr(int length,
FLT_OR_DBL *p)
{
double d = 0;
int *index = vrna_idx_row_wise((unsigned int)length);
if (p == NULL) {
vrna_message_warning("mean_bp_distance_pr: "
"p == NULL. You need to supply a valid probability matrix for mean_bp_distance_pr()");
return d;
}
d = wrap_mean_bp_distance(p, length, index, TURN);
free(index);
return d;
}
#endif
|
syncbench.c | /***************************************************************************
* *
* OpenMP MicroBenchmark Suite - Version 2.0 *
* *
* produced by *
* *
* Mark Bull and Fiona Reid *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk *
* *
* *
* This version copyright (c) The University of Edinburgh, 2004. *
* All rights reserved. *
* *
**************************************************************************/
#include <stdio.h>
#include <math.h>
#include <omp.h>
#define OUTERREPS 20
#define CONF95 1.96
int nthreads, delaylength, innerreps;
double times[OUTERREPS+1], reftime, refsd;
void delay(int);
void refer(void);
void referatom(void);
void referred(void);
void testpr(void);
void testfor(void);
void testpfor(void);
void testbar(void);
void testsing(void);
void testcrit(void);
void testlock(void);
void testorder(void);
void testatom(void);
void testred(void);
void stats(double*, double*);
int main (int argv, char **argc)
{
#pragma omp parallel
{
#pragma omp master
{
nthreads = omp_get_num_threads();
}
}
printf(" Running OpenMP benchmark on %d thread(s)\n", nthreads);
delaylength = 500;
innerreps = 10000;
/* GENERATE REFERENCE TIME */
refer();
/* TEST PARALLEL REGION */
innerreps = 1000;
testpr();
/* TEST FOR */
testfor();
/* TEST PARALLEL FOR */
testpfor();
/* TEST BARRIER */
testbar();
/* TEST SINGLE */
testsing();
/* TEST CRITICAL*/
innerreps = 100000;
testcrit();
/* TEST LOCK/UNLOCK */
testlock();
/* TEST ORDERED SECTION */
innerreps = 1000;
testorder();
/* GENERATE NEW REFERENCE TIME */
innerreps = 100000;
referatom();
/* TEST ATOMIC */
testatom();
/* GENERATE NEW REFERENCE TIME */
innerreps = 10000;
referred();
/* TEST REDUCTION (1 var) */
innerreps = 1000;
testred();
}
void refer()
{
int j,k;
double start;
double meantime, sd;
double getclock(void);
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing reference time 1\n");
for (k=0; k<=OUTERREPS; k++){
start = getclock();
for (j=0; j<innerreps; j++){
delay(delaylength);
}
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
stats (&meantime, &sd);
printf("Reference_time_1 = %f microseconds +/- %f\n", meantime, CONF95*sd);
reftime = meantime;
refsd = sd;
}
void referatom()
{
int j,k;
double start;
double meantime, sd;
float aaaa;
double getclock(void);
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing reference time 2\n");
for (k=0; k<=OUTERREPS; k++){
aaaa=0;
start = getclock();
for (j=0; j<innerreps; j++){
aaaa += 1;
}
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
if (aaaa < 0) printf("%f\n",aaaa);
}
stats (&meantime, &sd);
printf("Reference_time_2 = %f microseconds +/- %f\n", meantime, CONF95*sd);
reftime = meantime;
refsd = sd;
}
void referred()
{
int j,k;
double start;
double meantime, sd;
int aaaa;
double getclock(void);
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing reference time 3\n");
for (k=0; k<=OUTERREPS; k++){
aaaa=0;
start = getclock();
for (j=0; j<innerreps; j++){
delay(delaylength);
aaaa += 1;
}
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
if (aaaa < 0) printf("%d\n",aaaa);
}
stats (&meantime, &sd);
printf("Reference_time_3 = %f microseconds +/- %f\n", meantime, CONF95*sd);
reftime = meantime;
refsd = sd;
}
void testpr()
{
int j,k;
double start;
double meantime, sd;
double getclock(void);
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing PARALLEL time\n");
for (k=0; k<=OUTERREPS; k++){
start = getclock();
for (j=0; j<innerreps; j++){
#pragma omp parallel
{
delay(delaylength);
}
}
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
stats (&meantime, &sd);
printf("PARALLEL time = %f microseconds +/- %f\n", meantime, CONF95*sd);
printf("PARALLEL overhead = %f microseconds +/- %f\n", meantime-reftime, CONF95*(sd+refsd));
}
void testfor()
{
int i,j,k;
double start;
double meantime, sd;
double getclock(void);
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing FOR time\n");
for (k=0; k<=OUTERREPS; k++){
start = getclock();
#pragma omp parallel private(j)
{
for (j=0; j<innerreps; j++){
#pragma omp for
for (i=0; i<nthreads; i++){
delay(delaylength);
}
}
}
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
stats (&meantime, &sd);
printf("FOR time = %f microseconds +/- %f\n", meantime, CONF95*sd);
printf("FOR overhead = %f microseconds +/- %f\n", meantime-reftime, CONF95*(sd+refsd));
}
void testpfor()
{
int i,j,k;
double start;
double meantime, sd;
double getclock(void);
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing PARALLEL FOR time\n");
for (k=0; k<=OUTERREPS; k++){
start = getclock();
for (j=0; j<innerreps; j++){
#pragma omp parallel for
for (i=0; i<nthreads; i++){
delay(delaylength);
}
}
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
stats (&meantime, &sd);
printf("PARALLEL FOR time = %f microseconds +/- %f\n", meantime, CONF95*sd);
printf("PARALLEL FOR overhead = %f microseconds +/- %f\n", meantime-reftime, CONF95*(sd+refsd));
}
void testbar()
{
int j,k;
double start;
double meantime, sd;
double getclock(void);
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing BARRIER time\n");
for (k=0; k<=OUTERREPS; k++){
start = getclock();
#pragma omp parallel private(j)
{
for (j=0; j<innerreps; j++){
delay(delaylength);
#pragma omp barrier
}
}
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
stats (&meantime, &sd);
printf("BARRIER time = %f microseconds +/- %f\n", meantime, CONF95*sd);
printf("BARRIER overhead = %f microseconds +/- %f\n", meantime-reftime, CONF95*(sd+refsd));
}
void testsing()
{
int j,k;
double start;
double meantime, sd;
double getclock(void);
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing SINGLE time\n");
for (k=0; k<=OUTERREPS; k++){
start = getclock();
#pragma omp parallel private(j)
{
for (j=0; j<innerreps; j++){
#pragma omp single
delay(delaylength);
}
}
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
stats (&meantime, &sd);
printf("SINGLE time = %f microseconds +/- %f\n", meantime, CONF95*sd);
printf("SINGLE overhead = %f microseconds +/- %f\n", meantime-reftime, CONF95*(sd+refsd));
}
void testcrit()
{
int j,k;
double start;
double meantime, sd;
double getclock(void);
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing CRITICAL time\n");
for (k=0; k<=OUTERREPS; k++){
start = getclock();
#pragma omp parallel private(j)
{
for (j=0; j<innerreps/nthreads; j++){
#pragma omp critical
{
delay(delaylength);
}
}
}
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
stats (&meantime, &sd);
printf("CRITICAL time = %f microseconds +/- %f\n", meantime, CONF95*sd);
printf("CRITICAL overhead = %f microseconds +/- %f\n", meantime-reftime, CONF95*(sd+refsd));
}
void testlock()
{
int j,k;
double start;
double meantime, sd;
omp_lock_t lock;
double getclock(void);
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing LOCK/UNLOCK time\n");
omp_init_lock(&lock);
for (k=0; k<=OUTERREPS; k++){
start = getclock();
#pragma omp parallel private(j)
{
for (j=0; j<innerreps/nthreads; j++){
omp_set_lock(&lock);
delay(delaylength);
omp_unset_lock(&lock);
}
}
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
stats (&meantime, &sd);
printf("LOCK/UNLOCK time = %f microseconds +/- %f\n", meantime, CONF95*sd);
printf("LOCK/UNLOCK overhead = %f microseconds +/- %f\n", meantime-reftime, CONF95*(sd+refsd));
}
void testorder()
{
int j,k;
double start;
double meantime, sd;
double getclock(void);
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing ORDERED time\n");
for (k=0; k<=OUTERREPS; k++){
start = getclock();
#pragma omp parallel for ordered schedule (static,1)
for (j=0; j<innerreps; j++){
#pragma omp ordered
delay(delaylength);
}
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
stats (&meantime, &sd);
printf("ORDERED time = %f microseconds +/- %f\n", meantime, CONF95*sd);
printf("ORDERED overhead = %f microseconds +/- %f\n", meantime-reftime, CONF95*(sd+refsd));
}
void testatom()
{
int j,k;
double start;
double meantime, sd;
float aaaa;
double getclock(void);
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing ATOMIC time\n");
for (k=0; k<=OUTERREPS; k++){
aaaa = 0;
start = getclock();
#pragma omp parallel private(j)
{
for (j=0; j<innerreps/nthreads; j++){
#pragma omp atomic
aaaa += 1;
}
}
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
if (aaaa < 0.0) printf("%f\n",aaaa);
}
stats (&meantime, &sd);
printf("ATOMIC time = %f microseconds +/- %f\n", meantime, CONF95*sd);
printf("ATOMIC overhead = %f microseconds +/- %f\n", meantime-reftime, CONF95*(sd+refsd));
}
void testred()
{
int j,k;
double start;
double meantime, sd;
int aaaa;
double getclock(void);
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing REDUCTION time\n");
for (k=0; k<=OUTERREPS; k++){
aaaa = 0;
start = getclock();
for (j=0; j<innerreps; j++){
#pragma omp parallel reduction(+:aaaa)
{
delay(delaylength);
aaaa += 1;
}
}
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
if (aaaa < 0) printf("%d\n",aaaa);
}
stats (&meantime, &sd);
printf("REDUCTION time = %f microseconds +/- %f\n", meantime, CONF95*sd);
printf("REDUCTION overhead = %f microseconds +/- %f\n", meantime-reftime, CONF95*(sd+refsd));
}
void stats (double *mtp, double *sdp)
{
double meantime, totaltime, sumsq, mintime, maxtime, sd, cutoff;
int i, nr;
mintime = 1.0e10;
maxtime = 0.;
totaltime = 0.;
for (i=1; i<=OUTERREPS; i++){
mintime = (mintime < times[i]) ? mintime : times[i];
maxtime = (maxtime > times[i]) ? maxtime : times[i];
totaltime +=times[i];
}
meantime = totaltime / OUTERREPS;
sumsq = 0;
for (i=1; i<=OUTERREPS; i++){
sumsq += (times[i]-meantime)* (times[i]-meantime);
}
sd = sqrt(sumsq/(OUTERREPS-1));
cutoff = 3.0 * sd;
nr = 0;
for (i=1; i<=OUTERREPS; i++){
if ( fabs(times[i]-meantime) > cutoff ) nr ++;
}
printf("\n");
printf("Sample_size Average Min Max S.D. Outliers\n");
printf(" %d %f %f %f %f %d\n",OUTERREPS, meantime, mintime, maxtime, sd, nr);
printf("\n");
*mtp = meantime;
*sdp = sd;
}
|
16_omp_heap.c | // clang-format off
// RUN: %c-to-llvm %omp_c_flags %s | %apply-typeart -S 2>&1 | %filecheck %s
// REQUIRES: openmp
// clang-format on
#include <stdlib.h>
void foo(int** x) {
#pragma omp parallel // transformed to @__kmpc_fork_call
{
double* pd = calloc(10, sizeof(double));
pd = realloc(pd, 20 * sizeof(double));
}
#pragma omp parallel for
for (int i = 0; i < 10; ++i) {
x[i] = (int*)malloc(8 * sizeof(int));
free(x[i]);
}
}
// CHECK: [[POINTER:%[0-9a-z]+]] = call noalias{{( align [0-9]+)?}} i8* @calloc(i64 [[SIZE:[0-9a-z]+]], i64 8)
// CHECK-NEXT: call void @__typeart_alloc_omp(i8* [[POINTER]], i32 6, i64 [[SIZE]])
// CHECK-NEXT: bitcast i8* [[POINTER]] to double*
// CHECK: __typeart_free_omp(i8* [[POINTER:%[0-9a-z]+]])
// CHECK-NEXT: [[POINTER2:%[0-9a-z]+]] = call{{( align [0-9]+)?}} i8* @realloc(i8* [[POINTER]], i64 160)
// CHECK-NEXT: __typeart_alloc_omp(i8* [[POINTER2]], i32 6, i64 20)
// CHECK: [[POINTER:%[0-9a-z]+]] = call noalias{{( align [0-9]+)?}} i8* @malloc
// CHECK-NEXT: call void @__typeart_alloc_omp(i8* [[POINTER]], i32 2, i64 8)
// CHECK-NEXT: bitcast i8* [[POINTER]] to i32*
// CHECK: call void @free
// CHECK-NEXT: call void @__typeart_free_omp
// CHECK: TypeArtPass [Heap]
// CHECK-NEXT: Malloc{{[ ]*}}:{{[ ]*}}3
// CHECK-NEXT: Free{{[ ]*}}:{{[ ]*}}1
// CHECK-NEXT: Alloca{{[ ]*}}:{{[ ]*}}0 |
GB_unop__isfinite_bool_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isfinite_bool_fp64)
// op(A') function: GB (_unop_tran__isfinite_bool_fp64)
// C type: bool
// A type: double
// cast: double cij = (aij)
// unaryop: cij = isfinite (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = isfinite (x) ;
// casting
#define GB_CAST(z, aij) \
double z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (aij) ; \
Cx [pC] = isfinite (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isfinite_bool_fp64)
(
bool *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = (aij) ;
Cx [p] = isfinite (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = (aij) ;
Cx [p] = isfinite (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isfinite_bool_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test_verify_rowcols.c | #include "config.h"
/* getopt needs _POSIX_C_SOURCE 2 */
#define _POSIX_C_SOURCE 2
#include <ctype.h>
#include <limits.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#if defined(_MSC_VER)
#include "wingetopt/src/getopt.h"
#else
#include <unistd.h>
#endif
#include "parasail.h"
#include "parasail/cpuid.h"
#include "parasail/io.h"
#include "parasail/memory.h"
#include "parasail/matrix_lookup.h"
#include "func_verify_rowcols.h"
static int verbose = 0;
typedef struct gap_score {
int open;
int extend;
} gap_score_t;
gap_score_t gap_scores[] = {
{10,1},
{10,2},
{14,2},
{40,2},
{INT_MIN,INT_MIN}
};
static inline unsigned long binomial_coefficient(
unsigned long n,
unsigned long k)
{
/* from http://blog.plover.com/math/choose.html */
unsigned long r = 1;
unsigned long d;
if (k > n) {
return 0;
}
for (d = 1; d <= k; d++) {
r *= n--;
r /= d;
}
return r;
}
static inline void k_combination2(
unsigned long pos,
unsigned long *a,
unsigned long *b)
{
double s;
double i = floor(sqrt(2.0 * pos)) - 1.0;
if (i <= 1.0) {
i = 1.0;
}
s = i * (i - 1.0) / 2.0;
while (pos - s >= i) {
s += i;
i += 1;
}
*a = (unsigned long)(pos - s);
*b = (unsigned long)(i);
}
static inline int diff_array(
unsigned long size,
int *a,
int *b)
{
unsigned long i = 0;
for (i=0; i<size; ++i) {
if (a[i] != b[i]) return 1;
}
return 0;
}
static void check_functions(
parasail_function_group_t f,
parasail_sequences_t *sequences,
unsigned long pair_limit_,
const parasail_matrix_t *matrix_,
gap_score_t gap)
{
const parasail_function_info_t *functions = f.fs;
unsigned long matrix_index = 0;
unsigned long gap_index = 0;
unsigned long function_index = 0;
long long pair_index = 0;
long long pair_limit = (long long)pair_limit_;
parasail_function_t *reference_function = NULL;
const parasail_matrix_t ** matrices = parasail_matrices;
const parasail_matrix_t * single_matrix[] = {
matrix_,
NULL
};
if (NULL != matrix_) {
matrices = single_matrix;
}
printf("checking %s functions\n", f.name);
for (matrix_index=0; NULL!=matrices[matrix_index]; ++matrix_index) {
const parasail_matrix_t *matrix = matrices[matrix_index];
const char *matrixname = matrix->name;
if (verbose) printf("\t%s\n", matrixname);
for (gap_index=0; INT_MIN!=gap_scores[gap_index].open; ++gap_index) {
int open = gap_scores[gap_index].open;
int extend = gap_scores[gap_index].extend;
if (gap.open != INT_MIN && gap.extend != INT_MIN) {
open = gap.open;
extend = gap.extend;
}
if (verbose) printf("\t\topen=%d extend=%d\n", open, extend);
reference_function = functions[0].pointer;
for (function_index=1;
NULL!=functions[function_index].pointer;
++function_index) {
unsigned long saturated = 0;
if (verbose) printf("\t\t\t%s\n", functions[function_index].name);
#pragma omp parallel for
for (pair_index=0; pair_index<pair_limit; ++pair_index) {
parasail_result_t *reference_result = NULL;
parasail_result_t *result = NULL;
unsigned long a = 0;
unsigned long b = 1;
int *ref_score_row = NULL;
int *ref_score_col = NULL;
int *score_row = NULL;
int *score_col = NULL;
size_t size_a = 0;
size_t size_b = 0;
k_combination2(pair_index, &a, &b);
size_a = sequences->seqs[a].seq.l;
size_b = sequences->seqs[b].seq.l;
/*printf("\t\t\t\tpair=%lld (%lu,%lu)\n", pair_index, a, b);*/
reference_result = reference_function(
sequences->seqs[a].seq.s, size_a,
sequences->seqs[b].seq.s, size_b,
open, extend,
matrix);
result = functions[function_index].pointer(
sequences->seqs[a].seq.s, size_a,
sequences->seqs[b].seq.s, size_b,
open, extend,
matrix);
if (parasail_result_is_saturated(result)) {
/* no point in comparing a result that saturated */
parasail_result_free(reference_result);
parasail_result_free(result);
#pragma omp atomic
saturated += 1;
continue;
}
ref_score_row = parasail_result_get_score_row(reference_result);
ref_score_col = parasail_result_get_score_col(reference_result);
score_row = parasail_result_get_score_row(result);
score_col = parasail_result_get_score_col(result);
if (reference_result->score != result->score) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) wrong score (%d!=%d)\n",
functions[function_index].name,
a, b, open, extend,
matrixname,
reference_result->score, result->score);
}
}
if (diff_array(size_b, ref_score_row, score_row)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad score row\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (diff_array(size_a, ref_score_col, score_col)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad score col\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (parasail_result_is_stats(result)) {
int *ref_matches_row = parasail_result_get_matches_row(reference_result);
int *ref_matches_col = parasail_result_get_matches_col(reference_result);
int *ref_similar_row = parasail_result_get_similar_row(reference_result);
int *ref_similar_col = parasail_result_get_similar_col(reference_result);
int *ref_length_row = parasail_result_get_length_row(reference_result);
int *ref_length_col = parasail_result_get_length_col(reference_result);
int *matches_row = parasail_result_get_matches_row(result);
int *matches_col = parasail_result_get_matches_col(result);
int *similar_row = parasail_result_get_similar_row(result);
int *similar_col = parasail_result_get_similar_col(result);
int *length_row = parasail_result_get_length_row(result);
int *length_col = parasail_result_get_length_col(result);
if (diff_array(size_b, ref_matches_row, matches_row)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad matches row\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (diff_array(size_a, ref_matches_col, matches_col)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad matches col\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (diff_array(size_b, ref_similar_row, similar_row)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad similar row\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (diff_array(size_a, ref_similar_col, similar_col)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad similar col\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (diff_array(size_b, ref_length_row, length_row)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad length row\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (diff_array(size_a, ref_length_col, length_col)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad length col\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
}
parasail_result_free(reference_result);
parasail_result_free(result);
}
if (verbose && saturated) {
printf("%s %d %d %s saturated %lu times\n",
functions[function_index].name,
open, extend,
matrixname,
saturated);
}
}
if (gap.open != INT_MIN && gap.extend != INT_MIN) {
/* user-specified gap, don't loop */
break;
}
}
}
}
int main(int argc, char **argv)
{
unsigned long seq_count = 0;
unsigned long limit = 0;
parasail_sequences_t *sequences = NULL;
char *endptr = NULL;
char *filename = NULL;
int c = 0;
int test_scores = 1;
int test_stats = 0;
char *matrixname = NULL;
const parasail_matrix_t *matrix = NULL;
gap_score_t gap = {INT_MIN,INT_MIN};
while ((c = getopt(argc, argv, "f:m:n:o:e:vsS")) != -1) {
switch (c) {
case 'f':
filename = optarg;
break;
case 'm':
matrixname = optarg;
break;
case 'n':
errno = 0;
seq_count = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol");
exit(1);
}
break;
case 'o':
errno = 0;
gap.open = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol gap.open");
exit(1);
}
break;
case 'e':
errno = 0;
gap.extend = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol gap.extend");
exit(1);
}
break;
case 'v':
verbose = 1;
break;
case 's':
test_stats = 1;
break;
case 'S':
test_scores = 0;
break;
case '?':
if (optopt == 'f' || optopt == 'n') {
fprintf(stderr,
"Option -%c requires an argument.\n",
optopt);
}
else if (isprint(optopt)) {
fprintf(stderr, "Unknown option `-%c'.\n",
optopt);
}
else {
fprintf(stderr,
"Unknown option character `\\x%x'.\n",
optopt);
}
exit(1);
default:
fprintf(stderr, "default case in getopt\n");
exit(1);
}
}
if (filename) {
sequences = parasail_sequences_from_file(filename);
if (0 == seq_count) {
seq_count = sequences->l;
}
}
else {
fprintf(stderr, "no filename specified\n");
exit(1);
}
/* select the matrix */
if (matrixname) {
matrix = parasail_matrix_lookup(matrixname);
if (NULL == matrix) {
fprintf(stderr, "Specified substitution matrix not found.\n");
exit(1);
}
}
limit = binomial_coefficient(seq_count, 2);
printf("%lu choose 2 is %lu\n", seq_count, limit);
#if HAVE_SSE2
if (parasail_can_use_sse2()) {
if (test_scores) {
check_functions(parasail_nw_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qx_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_db_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_de_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_dx_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_de_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_db_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sw_rowcol_sse2, sequences, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_stats_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_stats_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_stats_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qx_stats_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_db_stats_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_de_stats_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_dx_stats_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_de_stats_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_db_stats_rowcol_sse2, sequences, limit, matrix, gap);
check_functions(parasail_sw_stats_rowcol_sse2, sequences, limit, matrix, gap);
}
}
#endif
#if HAVE_SSE41
if (parasail_can_use_sse41()) {
if (test_scores) {
check_functions(parasail_nw_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_qx_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_db_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_de_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_dx_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_de_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_db_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sw_rowcol_sse41, sequences, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_stats_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_stats_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_stats_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_qx_stats_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_db_stats_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_de_stats_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_dx_stats_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_de_stats_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_db_stats_rowcol_sse41, sequences, limit, matrix, gap);
check_functions(parasail_sw_stats_rowcol_sse41, sequences, limit, matrix, gap);
}
}
#endif
#if HAVE_AVX2
if (parasail_can_use_avx2()) {
if (test_scores) {
check_functions(parasail_nw_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qx_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_db_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_de_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_dx_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_de_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_db_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sw_rowcol_avx2, sequences, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_stats_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_stats_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_stats_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qx_stats_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_db_stats_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_de_stats_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_dx_stats_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_de_stats_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_db_stats_rowcol_avx2, sequences, limit, matrix, gap);
check_functions(parasail_sw_stats_rowcol_avx2, sequences, limit, matrix, gap);
}
}
#endif
#if HAVE_ALTIVEC
if (parasail_can_use_altivec()) {
if (test_scores) {
check_functions(parasail_nw_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_qx_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_db_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_de_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_dx_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_de_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_db_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sw_rowcol_altivec, sequences, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_stats_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_stats_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_stats_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_qx_stats_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_db_stats_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_de_stats_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_dx_stats_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_de_stats_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_db_stats_rowcol_altivec, sequences, limit, matrix, gap);
check_functions(parasail_sw_stats_rowcol_altivec, sequences, limit, matrix, gap);
}
}
#endif
#if HAVE_NEON
if (parasail_can_use_neon()) {
if (test_scores) {
check_functions(parasail_nw_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_qx_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_db_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_de_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_dx_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_de_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_db_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sw_rowcol_neon, sequences, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_stats_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_stats_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_stats_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_qx_stats_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_db_stats_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_de_stats_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_dx_stats_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_de_stats_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_db_stats_rowcol_neon, sequences, limit, matrix, gap);
check_functions(parasail_sw_stats_rowcol_neon, sequences, limit, matrix, gap);
}
}
#endif
if (test_scores) {
check_functions(parasail_nw_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_qx_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_db_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_de_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_dx_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_de_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_db_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sw_rowcol_disp, sequences, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_stats_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_stats_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_stats_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_qx_stats_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_db_stats_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_de_stats_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_dx_stats_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_qb_de_stats_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sg_qe_db_stats_rowcol_disp, sequences, limit, matrix, gap);
check_functions(parasail_sw_stats_rowcol_disp, sequences, limit, matrix, gap);
}
parasail_sequences_free(sequences);
return 0;
}
|
mkldnn_common.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2019 by Contributors
* \file mkldnn_common.h
* \brief Common header file for MKLDNN backend subgraph
* \author Ciyong Chen
*/
#ifndef MXNET_OPERATOR_SUBGRAPH_MKLDNN_MKLDNN_COMMON_H_
#define MXNET_OPERATOR_SUBGRAPH_MKLDNN_MKLDNN_COMMON_H_
#if MXNET_USE_ONEDNN == 1
#include <vector>
namespace mxnet {
namespace op {
template <typename DType>
static std::vector<float> GetWeightScales(const NDArray &weight, const NDArray *bias,
const float data_scale, bool weight_channelwise_scale) {
auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
std::vector<float> weight_scales;
const DType *weight_ptr = weight.data().dptr<DType>();
const DType *bias_ptr = bias? bias->data().dptr<DType>() : nullptr;
const auto wshape = weight.shape();
size_t channel = wshape[0];
size_t offset = wshape.ProdShape(1, wshape.ndim());
std::vector<DType> weight_c_min(channel, MaxValue<DType>());
std::vector<DType> weight_c_max(channel, MinValue<DType>());
for (int c = 0; c < static_cast<int>(channel); ++c) {
const DType *p1 = weight_ptr + c * offset;
for (size_t k = 0; k < offset; ++k) {
if (weight_c_min[c] > p1[k])
weight_c_min[c] = p1[k];
if (weight_c_max[c] < p1[k])
weight_c_max[c] = p1[k];
}
}
if (weight_channelwise_scale) {
weight_scales.resize(channel);
#pragma omp parallel for num_threads(nthreads)
for (int c = 0; c < static_cast<int>(channel); ++c) {
float scale = GetQuantizeScale(mshadow::kInt8, weight_c_min[c], weight_c_max[c]);
if (bias_ptr && bias_ptr[c]) {
// avoid overflow on bias
// TODO(zhennan): mkldnn has bug to handle INT_MAX in bias, so set the maximum value of bias
// to INT_MAX / 2.
float scale_max =
static_cast<float>(bias_ptr[c] > 0 ? MaxValue<int32_t>() : MinValue<int32_t>()) / 2 /
bias_ptr[c] / data_scale;
scale = Min(scale, scale_max);
}
weight_scales[c] = scale;
}
} else {
DType total_min = weight_c_min[0];
DType total_max = weight_c_max[0];
for (size_t c = 0; c < channel; ++c) {
if (total_min > weight_c_min[c]) total_min = weight_c_min[c];
if (total_max < weight_c_max[c]) total_max = weight_c_max[c];
}
weight_scales.resize(3);
weight_scales[0] = GetQuantizeScale(mshadow::kInt8, total_min, total_max);
weight_scales[1] = total_min;
weight_scales[2] = total_max;
}
return weight_scales;
}
static void ConvertWeightBias2MKLDNN(NDArray *weight, NDArray *bias, bool has_bias,
const mkldnn::memory::desc &weight_md,
const mkldnn::memory::desc *bias_md,
const int num_group, float data_scale,
const std::vector<float> &weight_scales,
const bool submit = true) {
MKLDNNStream *stream = MKLDNNStream::Get();
const auto new_weight = NDArray(weight_md);
const auto conv_weights_memory = new_weight.GetMKLDNNData();
mkldnn::primitive_attr weight_attr;
if (weight_scales.size()) {
const int weight_mask = (weight_scales.size()) == 1 ? 0 : 1;
weight_attr.set_output_scales(weight_mask, weight_scales);
}
auto default_weights_memory = GetWeights(*weight, num_group);
if (default_weights_memory == nullptr) default_weights_memory = weight->GetMKLDNNData();
const auto weight_reorder_pd =
mkldnn::reorder::primitive_desc(*default_weights_memory, *conv_weights_memory, weight_attr);
MKLDNNStream::Get()->RegisterPrimArgs(
mkldnn::reorder(weight_reorder_pd),
{{MKLDNN_ARG_FROM, *default_weights_memory}, {MKLDNN_ARG_TO, *conv_weights_memory}});
NDArray new_bias;
if (has_bias && data_scale) {
std::vector<float> bias_scales(weight_scales.size());
for (size_t c = 0; c < weight_scales.size(); ++c) {
bias_scales[c] = weight_scales[c] * data_scale;
}
new_bias = NDArray(*bias_md);
const auto conv_bias_memory = new_bias.GetMKLDNNData();
const int bias_mask = (bias_scales.size()) == 1 ? 0 : 1;
mkldnn::primitive_attr bias_attr;
bias_attr.set_output_scales(bias_mask, bias_scales);
auto bias_weights_memory = bias->GetMKLDNNData();
const auto bias_reorder_pd =
mkldnn::reorder::primitive_desc(*bias_weights_memory, *conv_bias_memory, bias_attr);
MKLDNNStream::Get()->RegisterPrimArgs(
mkldnn::reorder(bias_reorder_pd),
{{MKLDNN_ARG_FROM, *bias_weights_memory}, {MKLDNN_ARG_TO, *conv_bias_memory}});
}
if (submit)
stream->Submit();
*weight = new_weight;
if (has_bias && data_scale) *bias = new_bias;
}
} // namespace op
} // namespace mxnet
#endif // if MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_SUBGRAPH_MKLDNN_MKLDNN_COMMON_H_
|
neuralClasses.h | #pragma once
#include <iostream>
#include <fstream>
#include <algorithm>
#include <cassert>
#include <cmath>
#include <vector>
#include <boost/unordered_map.hpp>
#include <Eigen/Dense>
#include "maybe_omp.h"
#include "util.h"
#include "graphClasses.h"
#include "USCMatrix.h"
// classes for various kinds of layers
#include "SoftmaxLoss.h"
#include "Activation_function.h"
//#define EIGEN_DONT_PARALLELIZE
//#define EIGEN_DEFAULT_TO_ROW_MAJOR
using namespace std;
namespace nplm
{
// is this cheating?
using Eigen::Matrix;
using Eigen::Array;
using Eigen::MatrixBase;
using Eigen::Dynamic;
typedef boost::unordered_map<int,bool> int_map;
struct Clipper{
double operator() (double x) const {
return std::min(5., std::max(x,-5.));
//return(x);
}
};
class Linear_layer
{
private:
Matrix<double,Dynamic,Dynamic> U;
Matrix<double,Dynamic,Dynamic> U_gradient;
Matrix<double,Dynamic,Dynamic> U_velocity;
Matrix<double,Dynamic,Dynamic> U_running_gradient;
Matrix<double,Dynamic,Dynamic> U_running_parameter_update;
// Biases
Matrix<double,Dynamic,1> b;
Matrix<double,Dynamic,1> b_velocity;
Matrix<double,Dynamic,1> b_running_gradient;
Matrix<double,Dynamic,1> b_running_parameter_update;
Matrix<double,Dynamic,1> b_gradient;
friend class model;
public:
Linear_layer() { }
Linear_layer(int rows, int cols) { resize(rows, cols); }
void resize(int rows, int cols)
{
U.setZero(rows, cols);
U_gradient.setZero(rows, cols);
//U_running_gradient.setZero(rows, cols);
//U_running_parameter_updates.setZero(rows, cols);
//U_velocity.setZero(rows, cols);
b.resize(rows);
b_gradient.setZero(rows);
//b_running_gradient.resize(rows);
//b_velocity.resize(rows);
}
void read_weights(std::ifstream &U_file) { readMatrix(U_file, U); }
void write_weights(std::ofstream &U_file) { writeMatrix(U, U_file); }
void read_biases(std::ifstream &b_file) { readMatrix(b_file, b); }
void write_biases(std::ofstream &b_file) { writeMatrix(b, b_file); }
template <typename Engine>
void initialize(Engine &engine,
bool init_normal,
double init_range,
string ¶meter_update,
double adagrad_epsilon)
{
if (parameter_update == "ADA") {
U_running_gradient = Matrix<double,Dynamic,Dynamic>::Ones(U.rows(),U.cols())*adagrad_epsilon;
b_running_gradient = Matrix<double,Dynamic,1>::Ones(b.size())*adagrad_epsilon;
}
if (parameter_update == "ADAD") {
U_running_gradient.setZero(U.rows(),U.cols());
b_running_gradient.setZero(b.size());
U_running_parameter_update.setZero(U.rows(),U.cols());
b_running_parameter_update.setZero(b.size());
}
initMatrix(engine, U, init_normal, init_range);
initBias(engine, b, init_normal, init_range);
}
int n_inputs () const { return U.cols(); }
int n_outputs () const { return U.rows(); }
template <typename DerivedIn, typename DerivedOut>
void fProp(const MatrixBase<DerivedIn> &input,
const MatrixBase<DerivedOut> &output) const
{
UNCONST(DerivedOut, output, my_output);
my_output.leftCols(input.cols()).noalias() = U*input;
int num_examples = input.cols();
for (int example = 0;example < num_examples;example++)
{
my_output.leftCols(input.cols()).col(example) += b;
}
}
// Sparse input
template <typename ScalarIn, typename DerivedOut>
void fProp(const USCMatrix<ScalarIn> &input,
const MatrixBase<DerivedOut> &output_const) const
{
UNCONST(DerivedOut, output_const, output);
output.setZero();
uscgemm(1.0, U, input, output.leftCols(input.cols()));
// Each column corresponds to a training example. We
// parallelize the adding of biases per dimension.
int num_examples = input.cols();
for (int example = 0;example < num_examples;example++)
{
output.leftCols(input.cols()).col(example) += b;
}
}
template <typename DerivedGOut, typename DerivedGIn>
void bProp(const MatrixBase<DerivedGOut> &input,
MatrixBase<DerivedGIn> &output) const
{
UNCONST(DerivedGIn, output, my_output);
my_output.noalias() = U.transpose()*input;
}
template <typename DerivedGOut, typename DerivedIn>
void computeGradient( const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &fProp_input,
double learning_rate, double momentum, double L2_reg)
{
U_gradient.noalias() = bProp_input*fProp_input.transpose();
// get the bias gradient for all dimensions in parallel
int size = b.size();
b_gradient = bProp_input.rowwise().sum();
// This used to be multithreaded, but there was no measureable difference
if (L2_reg > 0.0)
{
U_gradient -= 2*L2_reg*U;
b_gradient -= 2*L2_reg*b;
}
if (momentum > 0.0)
{
U_velocity = momentum*U_velocity + U_gradient;
U += learning_rate * U_velocity;
b_velocity = momentum*b_velocity + b_gradient;
b += learning_rate * b_velocity;
}
else
{
/*
U += learning_rate * U_gradient;
b += learning_rate * b_gradient;
*/
//UPDATE CLIPPING
U += (learning_rate*U_gradient).array().unaryExpr(Clipper()).matrix();
b += (learning_rate*b_gradient).array().unaryExpr(Clipper()).matrix();
//GRADIENT CLIPPING
//U += learning_rate*(U_gradient.array().unaryExpr(Clipper())).matrix();
//b += learning_rate*(b_gradient.array().unaryExpr(Clipper())).matrix();
}
}
template <typename DerivedGOut, typename DerivedIn>
void computeGradientAdagrad(const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &fProp_input,
double learning_rate,
double L2_reg)
{
U_gradient.noalias() = bProp_input*fProp_input.transpose();
// get the bias gradient for all dimensions in parallel
int size = b.size();
b_gradient.noalias() = bProp_input.rowwise().sum();
if (L2_reg != 0)
{
U_gradient -= 2*L2_reg*U;
b_gradient -= 2*L2_reg*b;
}
// ignore momentum?
#pragma omp parallel for
for (int col=0; col<U.cols(); col++) {
U_running_gradient.col(col) += U_gradient.col(col).array().square().matrix();
U.col(col) += learning_rate * (U_gradient.col(col).array() /
U_running_gradient.col(col).array().sqrt()).matrix();
/*
//UPDATE CLIPPING
U.col(col) += (learning_rate * (U_gradient.col(col).array() / U_running_gradient.col(col).array().sqrt())).
unaryExpr(Clipper()).matrix();
*/
}
b_running_gradient += b_gradient.array().square().matrix();
b += learning_rate * (b_gradient.array()/b_running_gradient.array().sqrt()).matrix();
/*
//UPDATE CLIPPING
b += (learning_rate * (b_gradient.array()/b_running_gradient.array().sqrt())).unaryExpr(Clipper()).matrix();
*/
}
template <typename DerivedGOut, typename DerivedIn>
void computeGradientAdadelta(const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &fProp_input,
double learning_rate,
double L2_reg,
double conditioning_constant,
double decay)
{
//cerr<<"decay is "<<decay<<" and conditioning constant is "<<conditioning_constant<<endl;
U_gradient.noalias() = bProp_input*fProp_input.transpose();
Array<double,Dynamic,1> b_current_parameter_update;
// get the bias gradient for all dimensions in parallel
int size = b.size();
b_gradient.noalias() = bProp_input.rowwise().sum();
if (L2_reg != 0)
{
U_gradient -= 2*L2_reg*U;
b_gradient -= 2*L2_reg*b;
}
// ignore momentum?
#pragma omp parallel for
//cerr<<"U gradient is "<<U_gradient<<endl;
for (int col=0; col<U.cols(); col++) {
Array<double,Dynamic,1> U_current_parameter_update;
U_running_gradient.col(col) = decay*U_running_gradient.col(col) +
(1-decay)*U_gradient.col(col).array().square().matrix();
//cerr<<"U running gradient is "<<U_running_gradient.col(col)<<endl;
//getchar();
U_current_parameter_update = ((U_running_parameter_update.col(col).array()+conditioning_constant).sqrt()/
(U_running_gradient.col(col).array()+conditioning_constant).sqrt()) *
U_gradient.col(col).array();
//cerr<<"U current parameter update is "<<U_current_parameter_update<<endl;
//getchar();
//update the running parameter update
U_running_parameter_update.col(col) = decay*U_running_parameter_update.col(col) +
(1.-decay)*U_current_parameter_update.square().matrix();
U.col(col) += learning_rate*U_current_parameter_update.matrix();
}
b_running_gradient = decay*b_running_gradient +
(1.-decay)*b_gradient.array().square().matrix();
b_current_parameter_update = ((b_running_parameter_update.array()+conditioning_constant).sqrt()/
(b_running_gradient.array()+conditioning_constant).sqrt()) *
b_gradient.array();
b_running_parameter_update = decay*(b_running_parameter_update) +
(1.-decay)*b_current_parameter_update.square().matrix();
b += learning_rate*b_current_parameter_update.matrix();
}
template <typename DerivedGOut, typename DerivedIn, typename DerivedGW>
void computeGradientCheck(const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &fProp_input,
const MatrixBase<DerivedGW> &gradient) const
{
UNCONST(DerivedGW, gradient, my_gradient);
my_gradient.noalias() = bProp_input*fProp_input.transpose();
}
};
class Output_word_embeddings
{
private:
// row-major is better for uscgemm
//Matrix<double,Dynamic,Dynamic,Eigen::RowMajor> W;
// Having W be a pointer to a matrix allows ease of sharing
// input and output word embeddings
Matrix<double,Dynamic,Dynamic,Eigen::RowMajor> *W;
std::vector<double> W_data;
Matrix<double,Dynamic,1> b;
Matrix<double,Dynamic,Dynamic,Eigen::RowMajor> W_running_gradient;
Matrix<double,Dynamic,Dynamic,Eigen::RowMajor> W_gradient;
Matrix<double,Dynamic,Dynamic,Eigen::RowMajor> W_running_parameter_update;
Matrix<double,Dynamic,1> b_running_gradient;
Matrix<double,Dynamic,1> b_gradient;
Matrix<double,Dynamic,1> b_running_parameter_update;
public:
Output_word_embeddings() { }
Output_word_embeddings(int rows, int cols) { resize(rows, cols); }
void resize(int rows, int cols)
{
W->setZero(rows, cols);
b.setZero(rows);
}
void set_W(Matrix<double,Dynamic,Dynamic,Eigen::RowMajor> *input_W) {
W = input_W;
}
void read_weights(std::ifstream &W_file) { readMatrix(W_file, *W); }
void write_weights(std::ofstream &W_file) { writeMatrix(*W, W_file); }
void read_biases(std::ifstream &b_file) { readMatrix(b_file, b); }
void write_biases(std::ofstream &b_file) { writeMatrix(b, b_file); }
template <typename Engine>
void initialize(Engine &engine,
bool init_normal,
double init_range,
double init_bias,
string ¶meter_update,
double adagrad_epsilon)
{
W_gradient.setZero(W->rows(),W->cols());
b_gradient.setZero(b.size());
if (parameter_update == "ADA") {
W_running_gradient = Matrix<double,Dynamic,Dynamic>::Ones(W->rows(),W->cols())*adagrad_epsilon;
b_running_gradient = Matrix<double,Dynamic,1>::Ones(b.size())*adagrad_epsilon;
//W_gradient.setZero(W->rows(),W->cols());
//b_gradient.setZero(b.size());
}
if (parameter_update == "ADAD") {
W_running_gradient.setZero(W->rows(),W->cols());
b_running_gradient.setZero(b.size());
W_gradient.setZero(W->rows(),W->cols());
//b_gradient.setZero(b.size());
//W_running_parameter_update.setZero(W->rows(),W->cols());
b_running_parameter_update.setZero(b.size());
}
initMatrix(engine, *W, init_normal, init_range);
b.fill(init_bias);
}
int n_inputs () const { return W->cols(); }
int n_outputs () const { return W->rows(); }
template <typename DerivedIn, typename DerivedOut>
void fProp(const MatrixBase<DerivedIn> &input,
const MatrixBase<DerivedOut> &output) const
{
UNCONST(DerivedOut, output, my_output);
my_output = ((*W) * input).colwise() + b;
}
// Sparse output version
template <typename DerivedIn, typename DerivedOutI, typename DerivedOutV>
void fProp(const MatrixBase<DerivedIn> &input,
const MatrixBase<DerivedOutI> &samples,
const MatrixBase<DerivedOutV> &output) const
{
UNCONST(DerivedOutV, output, my_output);
#pragma omp parallel for
for (int instance_id = 0; instance_id < samples.cols(); instance_id++)
{
for (int sample_id = 0; sample_id < samples.rows(); sample_id++)
{
my_output(sample_id, instance_id) = b(samples(sample_id, instance_id));
}
}
USCMatrix<double> sparse_output(W->rows(), samples, my_output);
uscgemm_masked(1.0, *W, input, sparse_output);
my_output = sparse_output.values; // too bad, so much copying
}
// Return single element of output matrix
template <typename DerivedIn>
double fProp(const MatrixBase<DerivedIn> &input,
int word,
int instance) const
{
return W->row(word).dot(input.col(instance)) + b(word);
}
// Dense versions (for log-likelihood loss)
template <typename DerivedGOut, typename DerivedGIn>
void bProp(const MatrixBase<DerivedGOut> &input_bProp_matrix,
const MatrixBase<DerivedGIn> &bProp_matrix) const
{
// W is vocab_size x output_embedding_dimension
// input_bProp_matrix is vocab_size x minibatch_size
// bProp_matrix is output_embedding_dimension x minibatch_size
UNCONST(DerivedGIn, bProp_matrix, my_bProp_matrix);
my_bProp_matrix.leftCols(input_bProp_matrix.cols()).noalias() =
W->transpose() * input_bProp_matrix;
}
template <typename DerivedIn, typename DerivedGOut>
void computeGradient(const MatrixBase<DerivedIn> &predicted_embeddings,
const MatrixBase<DerivedGOut> &bProp_input,
double learning_rate,
double momentum) //not sure if we want to use momentum here
{
// W is vocab_size x output_embedding_dimension
// b is vocab_size x 1
// predicted_embeddings is output_embedding_dimension x minibatch_size
// bProp_input is vocab_size x minibatch_size
/*
W->noalias() += learning_rate * bProp_input * predicted_embeddings.transpose();
b += learning_rate * bProp_input.rowwise().sum();
*/
/*
//GRADIENT CLIPPING
W->noalias() += learning_rate *
((bProp_input * predicted_embeddings.transpose()).array().unaryExpr(Clipper())).matrix();
b += learning_rate * (bProp_input.rowwise().sum().array().unaryExpr(Clipper())).matrix();
*/
//UPDATE CLIPPING
W->noalias() += (learning_rate *
(bProp_input * predicted_embeddings.transpose())).array().unaryExpr(Clipper()).matrix();
b += (learning_rate * (bProp_input.rowwise().sum())).array().unaryExpr(Clipper()).matrix();
}
template <typename DerivedIn, typename DerivedGOut>
void computeGradientAdagrad(
const MatrixBase<DerivedIn> &predicted_embeddings,
const MatrixBase<DerivedGOut> &bProp_input,
double learning_rate) //not sure if we want to use momentum here
{
// W is vocab_size x output_embedding_dimension
// b is vocab_size x 1
// predicted_embeddings is output_embedding_dimension x minibatch_size
// bProp_input is vocab_size x minibatch_sizea
W_gradient.setZero(W->rows(), W->cols());
b_gradient.setZero(b.size());
W_gradient.noalias() = bProp_input * predicted_embeddings.transpose();
b_gradient.noalias() = bProp_input.rowwise().sum();
W_running_gradient += W_gradient.array().square().matrix();
b_running_gradient += b_gradient.array().square().matrix();
W->noalias() += learning_rate * (W_gradient.array()/W_running_gradient.array().sqrt()).matrix();
b += learning_rate * (b_gradient.array()/b_running_gradient.array().sqrt()).matrix();
/*
//UPDATE CLIPPING
*W += (learning_rate * (W_gradient.array()/W_running_gradient.array().sqrt())).unaryExpr(Clipper()).matrix();
b += (learning_rate * (b_gradient.array()/b_running_gradient.array().sqrt())).unaryExpr(Clipper()).matrix();
*/
}
template <typename DerivedIn, typename DerivedGOut>
void computeGradientAdadelta(const MatrixBase<DerivedIn> &predicted_embeddings,
const MatrixBase<DerivedGOut> &bProp_input,
double learning_rate,
double conditioning_constant,
double decay) //not sure if we want to use momentum here
{
// W is vocab_size x output_embedding_dimension
// b is vocab_size x 1
// predicted_embeddings is output_embedding_dimension x minibatch_size
// bProp_input is vocab_size x minibatch_size
Array<double,Dynamic,Dynamic> W_current_parameter_update;
Array<double,Dynamic,1> b_current_parameter_update;
W_gradient.setZero(W->rows(), W->cols());
b_gradient.setZero(b.size());
W_gradient.noalias() = bProp_input * predicted_embeddings.transpose();
b_gradient.noalias() = bProp_input.rowwise().sum();
W_running_gradient = decay*W_running_gradient +
(1.-decay)*W_gradient.array().square().matrix();
b_running_gradient = decay*b_running_gradient+
(1.-decay)*b_gradient.array().square().matrix();
W_current_parameter_update = ((W_running_parameter_update.array()+conditioning_constant).sqrt()/
(W_running_gradient.array()+conditioning_constant).sqrt())*
W_gradient.array();
b_current_parameter_update = ((b_running_parameter_update.array()+conditioning_constant).sqrt()/
(b_running_gradient.array()+conditioning_constant).sqrt())*
b_gradient.array();
W_running_parameter_update = decay*W_running_parameter_update +
(1.-decay)*W_current_parameter_update.square().matrix();
b_running_parameter_update = decay*b_running_parameter_update +
(1.-decay)*b_current_parameter_update.square().matrix();
*W += learning_rate*W_current_parameter_update.matrix();
b += learning_rate*b_current_parameter_update.matrix();
}
// Sparse versions
template <typename DerivedGOutI, typename DerivedGOutV, typename DerivedGIn>
void bProp(const MatrixBase<DerivedGOutI> &samples,
const MatrixBase<DerivedGOutV> &weights,
const MatrixBase<DerivedGIn> &bProp_matrix) const
{
UNCONST(DerivedGIn, bProp_matrix, my_bProp_matrix);
my_bProp_matrix.setZero();
uscgemm(1.0,
W->transpose(),
USCMatrix<double>(W->rows(), samples, weights),
my_bProp_matrix.leftCols(samples.cols())); // narrow bProp_matrix for possible short minibatch
}
template <typename DerivedIn, typename DerivedGOutI, typename DerivedGOutV>
void computeGradient(const MatrixBase<DerivedIn> &predicted_embeddings,
const MatrixBase<DerivedGOutI> &samples,
const MatrixBase<DerivedGOutV> &weights,
double learning_rate, double momentum) //not sure if we want to use momentum here
{
//cerr<<"in gradient"<<endl;
/*
USCMatrix<double> gradient_output(W->rows(), samples, weights);
uscgemm(learning_rate,
gradient_output,
predicted_embeddings.leftCols(gradient_output.cols()).transpose(),
*W); // narrow predicted_embeddings for possible short minibatch
uscgemv(learning_rate,
gradient_output,
Matrix<double,Dynamic,1>::Ones(gradient_output.cols()),
b);
*/
//IN ORDER TO IMPLEMENT CLIPPING, WE HAVE TO COMPUTE THE GRADIENT
//FIRST
USCMatrix<double> gradient_output(W->rows(), samples, weights);
uscgemm(1.0,
gradient_output,
predicted_embeddings.leftCols(samples.cols()).transpose(),
W_gradient);
uscgemv(1.0,
gradient_output,
Matrix<double,Dynamic,1>::Ones(weights.cols()),
b_gradient);
int_map update_map; //stores all the parameters that have been updated
for (int sample_id=0; sample_id<samples.rows(); sample_id++)
for (int train_id=0; train_id<samples.cols(); train_id++)
update_map[samples(sample_id, train_id)] = 1;
// Convert to std::vector for parallelization
std::vector<int> update_items;
for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it)
update_items.push_back(it->first);
int num_items = update_items.size();
//#pragma omp parallel for
for (int item_id=0; item_id<num_items; item_id++)
{
int update_item = update_items[item_id];
//W->row(update_item) += learning_rate * W_gradient.row(update_item);
//b(update_item) += learning_rate * b_gradient(update_item);
//UPDATE CLIPPING
W->row(update_item) += (learning_rate * W_gradient.row(update_item)).array().unaryExpr(Clipper()).matrix();
double update = learning_rate * b_gradient(update_item);
b(update_item) += std::min(0.5, std::max(update,-0.5));
//GRADIENT CLIPPING
W_gradient.row(update_item).setZero();
b_gradient(update_item) = 0.;
}
//cerr<<"Finished gradient"<<endl;
}
template <typename DerivedIn, typename DerivedGOutI, typename DerivedGOutV>
void computeGradientAdagrad(const MatrixBase<DerivedIn> &predicted_embeddings,
const MatrixBase<DerivedGOutI> &samples,
const MatrixBase<DerivedGOutV> &weights,
double learning_rate) //not sure if we want to use momentum here
{
//W_gradient.setZero(W->rows(), W->cols());
//b_gradient.setZero(b.size());
//FOR CLIPPING, WE DO NOT MULTIPLY THE GRADIENT WITH THE LEARNING RATE
USCMatrix<double> gradient_output(W->rows(), samples, weights);
uscgemm(1.0,
gradient_output,
predicted_embeddings.leftCols(samples.cols()).transpose(),
W_gradient);
uscgemv(1.0,
gradient_output,
Matrix<double,Dynamic,1>::Ones(weights.cols()),
b_gradient);
int_map update_map; //stores all the parameters that have been updated
for (int sample_id=0; sample_id<samples.rows(); sample_id++)
for (int train_id=0; train_id<samples.cols(); train_id++)
update_map[samples(sample_id, train_id)] = 1;
// Convert to std::vector for parallelization
std::vector<int> update_items;
for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it)
update_items.push_back(it->first);
int num_items = update_items.size();
//#pragma omp parallel for
for (int item_id=0; item_id<num_items; item_id++)
{
int update_item = update_items[item_id];
W_running_gradient.row(update_item) += W_gradient.row(update_item).array().square().matrix();
b_running_gradient(update_item) += b_gradient(update_item) * b_gradient(update_item);
W->row(update_item) += learning_rate * (W_gradient.row(update_item).array() / W_running_gradient.row(update_item).array().sqrt()).matrix();
b(update_item) += learning_rate * b_gradient(update_item) / sqrt(b_running_gradient(update_item));
/*
//UPDATE CLIPPING
W->row(update_item) += (learning_rate * (W_gradient.row(update_item).array() / W_running_gradient.row(update_item).array().sqrt())).unaryExpr(Clipper()).matrix();
double update = learning_rate * b_gradient(update_item) / sqrt(b_running_gradient(update_item));
b(update_item) += Clipper(update);//std::min(0.5, std::max(update,-0.5));
*/
W_gradient.row(update_item).setZero();
b_gradient(update_item) = 0.;
}
}
template <typename DerivedIn, typename DerivedGOutI, typename DerivedGOutV>
void computeGradientAdadelta(const MatrixBase<DerivedIn> &predicted_embeddings,
const MatrixBase<DerivedGOutI> &samples,
const MatrixBase<DerivedGOutV> &weights,
double learning_rate,
double conditioning_constant,
double decay) //not sure if we want to use momentum here
{
//cerr<<"decay is "<<decay<<" and constant is "<<conditioning_constant<<endl;
//W_gradient.setZero(W->rows(), W->cols());
//b_gradient.setZero(b.size());
USCMatrix<double> gradient_output(W->rows(), samples, weights);
uscgemm(1.0,
gradient_output,
predicted_embeddings.leftCols(samples.cols()).transpose(),
W_gradient);
uscgemv(1.0,
gradient_output,
Matrix<double,Dynamic,1>::Ones(weights.cols()),
b_gradient);
int_map update_map; //stores all the parameters that have been updated
for (int sample_id=0; sample_id<samples.rows(); sample_id++)
for (int train_id=0; train_id<samples.cols(); train_id++)
update_map[samples(sample_id, train_id)] = 1;
// Convert to std::vector for parallelization
std::vector<int> update_items;
for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it)
update_items.push_back(it->first);
int num_items = update_items.size();
#pragma omp parallel for
for (int item_id=0; item_id<num_items; item_id++)
{
Array<double,1,Dynamic> W_current_parameter_update;
double b_current_parameter_update;
int update_item = update_items[item_id];
W_running_gradient.row(update_item) = decay*W_running_gradient.row(update_item)+
(1.-decay)*W_gradient.row(update_item).array().square().matrix();
b_running_gradient(update_item) = decay*b_running_gradient(update_item)+
(1.-decay)*b_gradient(update_item)*b_gradient(update_item);
//cerr<<"Output: W gradient is "<<W_gradient.row(update_item)<<endl;
//getchar();
//cerr<<"Output: W running gradient is "<<W_running_gradient.row(update_item)<<endl;
//getchar();
W_current_parameter_update = ((W_running_parameter_update.row(update_item).array()+conditioning_constant).sqrt()/
(W_running_gradient.row(update_item).array()+conditioning_constant).sqrt())*
W_gradient.row(update_item).array();
b_current_parameter_update = (sqrt(b_running_parameter_update(update_item)+conditioning_constant)/
sqrt(b_running_gradient(update_item)+conditioning_constant))*
b_gradient(update_item);
//cerr<<"Output: W current parameter update is "<<W_current_parameter_update<<endl;
//getchar();
//cerr<<"Output: W running parameter update before is "<<W_running_parameter_update.row(update_item)<<endl;
//getchar();
//cerr<<"the second term is "<<(1.-decay)*W_current_parameter_update.square().matrix()<<endl;
W_running_parameter_update.row(update_item) = decay*W_running_parameter_update.row(update_item)+
(1.-decay)*(W_current_parameter_update.square().matrix());
b_running_parameter_update(update_item) = decay*b_running_parameter_update(update_item)+
(1.-decay)*b_current_parameter_update*b_current_parameter_update;
//cerr<<"Output: W running parameter update is "<<W_running_parameter_update.row(update_item)<<endl;
//getchar();
W->row(update_item) += learning_rate*W_current_parameter_update.matrix();
b(update_item) += learning_rate*b_current_parameter_update;
W_gradient.row(update_item).setZero();
b_gradient(update_item) = 0.;
}
}
template <typename DerivedIn, typename DerivedGOutI, typename DerivedGOutV, typename DerivedGW, typename DerivedGb>
void computeGradientCheck(const MatrixBase<DerivedIn> &predicted_embeddings,
const MatrixBase<DerivedGOutI> &samples,
const MatrixBase<DerivedGOutV> &weights,
const MatrixBase<DerivedGW> &gradient_W,
const MatrixBase<DerivedGb> &gradient_b) const
{
UNCONST(DerivedGW, gradient_W, my_gradient_W);
UNCONST(DerivedGb, gradient_b, my_gradient_b);
my_gradient_W.setZero();
my_gradient_b.setZero();
USCMatrix<double> gradient_output(W->rows(), samples, weights);
uscgemm(1.0,
gradient_output,
predicted_embeddings.leftCols(samples.cols()).transpose(),
my_gradient_W);
uscgemv(1.0, gradient_output,
Matrix<double,Dynamic,1>::Ones(weights.cols()), my_gradient_b);
}
};
class Input_word_embeddings
{
private:
Matrix<double,Dynamic,Dynamic,Eigen::RowMajor> *W;
int context_size, vocab_size;
Matrix<double,Dynamic,Dynamic,Eigen::RowMajor> W_running_gradient;
Matrix<double,Dynamic,Dynamic,Eigen::RowMajor> W_running_parameter_update;
Matrix<double,Dynamic,Dynamic,Eigen::RowMajor> W_gradient;
friend class model;
public:
Input_word_embeddings() : context_size(0), vocab_size(0) { }
Input_word_embeddings(int rows, int cols, int context) { resize(rows, cols, context); }
void set_W(Matrix<double,Dynamic,Dynamic,Eigen::RowMajor> *input_W) {
W = input_W;
}
void resize(int rows, int cols, int context)
{
context_size = context;
vocab_size = rows;
W->setZero(rows, cols);
}
void read(std::ifstream &W_file) { readMatrix(W_file, *W); }
void write(std::ofstream &W_file) { writeMatrix(*W, W_file); }
template <typename Engine>
void initialize(Engine &engine,
bool init_normal,
double init_range,
string ¶meter_update,
double adagrad_epsilon)
{
W_gradient.setZero(W->rows(),W->cols());
if (parameter_update == "ADA") {
W_running_gradient = Matrix<double,Dynamic,Dynamic>::Ones(W->rows(),W->cols())*adagrad_epsilon;
//W_gradient.setZero(W->rows(),W->cols());
}
if (parameter_update == "ADAD") {
W_running_gradient.setZero(W->rows(),W->cols());
//W_gradient.setZero(W->rows(),W->cols());
W_running_parameter_update.setZero(W->rows(),W->cols());
}
initMatrix(engine,
*W,
init_normal,
init_range);
}
int n_inputs() const { return -1; }
int n_outputs() const { return W->cols() * context_size; }
// set output_id's embedding to the weighted average of all embeddings
template <typename Dist>
void average(const Dist &dist, int output_id)
{
W->row(output_id).setZero();
for (int i=0; i < W->rows(); i++)
if (i != output_id)
W->row(output_id) += dist.prob(i) * W->row(i);
}
template <typename DerivedIn, typename DerivedOut>
void fProp(const MatrixBase<DerivedIn> &input,
const MatrixBase<DerivedOut> &output) const
{
int embedding_dimension = W->cols();
// W is vocab_size x embedding_dimension
// input is ngram_size*vocab_size x minibatch_size
// output is ngram_size*embedding_dimension x minibatch_size
/*
// Dense version:
for (int ngram=0; ngram<context_size; ngram++)
output.middleRows(ngram*embedding_dimension, embedding_dimension) = W.transpose() * input.middleRows(ngram*vocab_size, vocab_size);
*/
UNCONST(DerivedOut, output, my_output);
my_output.setZero();
for (int ngram=0; ngram<context_size; ngram++)
{
// input might be narrower than expected due to a short minibatch,
// so narrow output to match
uscgemm(1.0,
W->transpose(),
USCMatrix<double>(W->rows(),input.middleRows(ngram, 1),Matrix<double,1,Dynamic>::Ones(input.cols())),
my_output.block(ngram*embedding_dimension, 0, embedding_dimension, input.cols()));
}
}
// When model is premultiplied, this layer doesn't get used,
// but this method is used to get the input into a sparse matrix.
// Hopefully this can get eliminated someday
template <typename DerivedIn, typename ScalarOut>
void munge(const MatrixBase<DerivedIn> &input, USCMatrix<ScalarOut> &output) const
{
output.resize(vocab_size*context_size, context_size, input.cols());
for (int i=0; i < context_size; i++)
output.indexes.row(i).array() = input.row(i).array() + i*vocab_size;
output.values.fill(1.0);
}
template <typename DerivedGOut, typename DerivedIn>
void computeGradient(const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &input_words,
double learning_rate, double momentum, double L2_reg)
{
int embedding_dimension = W->cols();
// W is vocab_size x embedding_dimension
// input is ngram_size*vocab_size x minibatch_size
// bProp_input is ngram_size*embedding_dimension x minibatch_size
/*
// Dense version:
for (int ngram=0; ngram<context_size; ngram++)
W += learning_rate * input_words.middleRows(ngram*vocab_size, vocab_size) * bProp_input.middleRows(ngram*embedding_dimension, embedding_dimension).transpose()
*/
/*
for (int ngram=0; ngram<context_size; ngram++)
{
uscgemm(learning_rate,
USCMatrix<double>(W->rows(), input_words.middleRows(ngram, 1), Matrix<double,1,Dynamic>::Ones(input_words.cols())),
bProp_input.block(ngram*embedding_dimension,0,embedding_dimension,input_words.cols()).transpose(),
*W);
}
*/
//IF WE WANT TO DO GRADIENT CLIPPING, THEN WE FIRST COMPUTE THE GRADIENT AND THEN
//PERFORM CLIPPING WHILE UPDATING
for (int ngram=0; ngram<context_size; ngram++)
{
uscgemm(1.0,
USCMatrix<double>(W->rows(),input_words.middleRows(ngram, 1),Matrix<double,1,Dynamic>::Ones(input_words.cols())),
bProp_input.block(ngram*embedding_dimension, 0, embedding_dimension, input_words.cols()).transpose(),
W_gradient);
}
int_map update_map; //stores all the parameters that have been updated
for (int ngram=0; ngram<context_size; ngram++)
{
for (int train_id=0; train_id<input_words.cols(); train_id++)
{
update_map[input_words(ngram,train_id)] = 1;
}
}
// Convert to std::vector for parallelization
std::vector<int> update_items;
for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it)
{
update_items.push_back(it->first);
}
int num_items = update_items.size();
#pragma omp parallel for
for (int item_id=0; item_id<num_items; item_id++)
{
int update_item = update_items[item_id];
//UPDATE CLIPPING
W->row(update_item) += (learning_rate*
W_gradient.row(update_item).array().unaryExpr(Clipper())).matrix();
//GRADIENT CLIPPING
//W->row(update_item) += learning_rate*
// W_gradient.row(update_item).array().unaryExpr(Clipper()).matrix();
//SETTING THE GRADIENT TO ZERO
W_gradient.row(update_item).setZero();
}
}
template <typename DerivedGOut, typename DerivedIn>
void computeGradientAdagrad(const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &input_words,
double learning_rate,
double L2_reg)
{
int embedding_dimension = W->cols();
//W_gradient.setZero(W->rows(), W->cols());
/*
if (W_running_gradient.rows() != W->rows() || W_running_gradient.cols() != W->cols())
W_running_gradient = Ones(W->rows(), W->cols())*adagrad_epsilon;
*/
for (int ngram=0; ngram<context_size; ngram++)
{
uscgemm(1.0,
USCMatrix<double>(W->rows(),input_words.middleRows(ngram, 1),Matrix<double,1,Dynamic>::Ones(input_words.cols())),
bProp_input.block(ngram*embedding_dimension, 0, embedding_dimension, input_words.cols()).transpose(),
W_gradient);
}
int_map update_map; //stores all the parameters that have been updated
for (int ngram=0; ngram<context_size; ngram++)
{
for (int train_id=0; train_id<input_words.cols(); train_id++)
{
update_map[input_words(ngram,train_id)] = 1;
}
}
// Convert to std::vector for parallelization
std::vector<int> update_items;
for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it)
{
update_items.push_back(it->first);
}
int num_items = update_items.size();
#pragma omp parallel for
for (int item_id=0; item_id<num_items; item_id++)
{
int update_item = update_items[item_id];
W_running_gradient.row(update_item) += W_gradient.row(update_item).array().square().matrix();
W->row(update_item) += learning_rate *
(W_gradient.row(update_item).array() / W_running_gradient.row(update_item).array().sqrt()).matrix();
/*
//UPDATE CLIPPING
W->row(update_item) += (learning_rate *
(W_gradient.row(update_item).array() / W_running_gradient.row(update_item).array().sqrt()))
.unaryExpr(Clipper()).matrix();
*/
W_gradient.row(update_item).setZero();
}
}
template <typename DerivedGOut, typename DerivedIn>
void computeGradientAdadelta(const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &input_words,
double learning_rate,
double L2_reg,
double conditioning_constant,
double decay)
{
int embedding_dimension = W->cols();
//W_gradient.setZero(W->rows(), W->cols());
/*
if (W_running_gradient.rows() != W->rows() || W_running_gradient.cols() != W->cols())
W_running_gradient = Ones(W->rows(), W->cols())*adagrad_epsilon;
*/
for (int ngram=0; ngram<context_size; ngram++)
{
uscgemm(1.0,
USCMatrix<double>(W->rows(),input_words.middleRows(ngram, 1),Matrix<double,1,Dynamic>::Ones(input_words.cols())),
bProp_input.block(ngram*embedding_dimension, 0, embedding_dimension, input_words.cols()).transpose(),
W_gradient);
}
int_map update_map; //stores all the parameters that have been updated
for (int ngram=0; ngram<context_size; ngram++)
{
for (int train_id=0; train_id<input_words.cols(); train_id++)
{
update_map[input_words(ngram,train_id)] = 1;
}
}
// Convert to std::vector for parallelization
std::vector<int> update_items;
for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it)
{
update_items.push_back(it->first);
}
int num_items = update_items.size();
#pragma omp parallel for
for (int item_id=0; item_id<num_items; item_id++)
{
Array<double,1,Dynamic> W_current_parameter_update;
int update_item = update_items[item_id];
W_running_gradient.row(update_item) = decay*W_running_gradient.row(update_item)+
(1.-decay)*W_gradient.row(update_item).array().square().matrix();
W_current_parameter_update = ((W_running_parameter_update.row(update_item).array()+conditioning_constant).sqrt()/
(W_running_gradient.row(update_item).array()+conditioning_constant).sqrt())*
W_gradient.row(update_item).array();
//cerr<<"Input: W current parameter update is "<<W_current_parameter_update<<endl;
//getchar();
W_running_parameter_update.row(update_item) = decay*W_running_parameter_update.row(update_item)+
(1.-decay)*W_current_parameter_update.square().matrix();
W->row(update_item) += learning_rate*W_current_parameter_update.matrix();
//cerr<<"Input: After update, W is "<<W->row(update_item)<<endl;
//getchar();
W_gradient.row(update_item).setZero();
}
}
template <typename DerivedGOut, typename DerivedIn, typename DerivedGW>
void computeGradientCheck(const MatrixBase<DerivedGOut> &bProp_input,
const MatrixBase<DerivedIn> &input_words,
int x, int minibatch_size,
const MatrixBase<DerivedGW> &gradient) const //not sure if we want to use momentum here
{
UNCONST(DerivedGW, gradient, my_gradient);
int embedding_dimension = W->cols();
my_gradient.setZero();
for (int ngram=0; ngram<context_size; ngram++)
uscgemm(1.0,
USCMatrix<double>(W->rows(),input_words.middleRows(ngram, 1),Matrix<double,1,Dynamic>::Ones(input_words.cols())),
bProp_input.block(ngram*embedding_dimension, 0, embedding_dimension, input_words.cols()).transpose(),
my_gradient);
}
};
} // namespace nplm
|
symgs.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
void smooth(level_type * level, int phi_id, int rhs_id, double a, double b){
int box,s;
for(s=0;s<2*NUM_SMOOTHS;s++){ // there are two sweeps (forward/backward) per GS smooth
exchange_boundary(level,phi_id,stencil_is_star_shaped());
apply_BCs(level,phi_id);
// now do ghosts communication-avoiding smooths on each box...
uint64_t _timeStart = CycleTime();
// #pragma omp parallel for
hclib::finish([] {
hclib::loop_domain_1d loop(level->num_my_boxes);
hclib::forasync(&loop, [] (int box) {
int i,j,k;
const int ghosts = level->box_ghosts;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int dim = level->my_boxes[box].dim;
const double h2inv = 1.0/(level->h*level->h);
double * __restrict__ phi = level->my_boxes[box].vectors[ phi_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point
const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride);
const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride);
const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride);
const double * __restrict__ Dinv = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride);
const double * __restrict__ valid = level->my_boxes[box].vectors[VECTOR_VALID ] + ghosts*(1+jStride+kStride); // cell is inside the domain
if( (s&0x1)==0 ){ // forward sweep... hard to thread
for(k=0;k<dim;k++){
for(j=0;j<dim;j++){
for(i=0;i<dim;i++){
int ijk = i + j*jStride + k*kStride;
double Ax = apply_op_ijk(phi);
phi[ijk] = phi[ijk] + Dinv[ijk]*(rhs[ijk]-Ax);
}}}
}else{ // backward sweep... hard to thread
for(k=dim-1;k>=0;k--){
for(j=dim-1;j>=0;j--){
for(i=dim-1;i>=0;i--){
int ijk = i + j*jStride + k*kStride;
double Ax = apply_op_ijk(phi);
phi[ijk] = phi[ijk] + Dinv[ijk]*(rhs[ijk]-Ax);
}}}
}
});
});
level->cycles.smooth += (uint64_t)(CycleTime()-_timeStart);
} // s-loop
}
//------------------------------------------------------------------------------------------------------------------------------
|
GB_unaryop__lnot_bool_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_bool_uint16
// op(A') function: GB_tran__lnot_bool_uint16
// C type: bool
// A type: uint16_t
// cast: bool cij = (bool) aij
// unaryop: cij = !aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_bool_uint16
(
bool *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_bool_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fig4.36-lastprivate-alternative.c | /*
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
Copyright 2009 Sun Microsystems, Inc. All rights reserved.
The contents of this file are subject to the terms of the BSD License("BSD")(the "License").
You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt
The BSD License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistribution of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistribution in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Sun Microsystems, Inc. or the names of
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
This software is provided "AS IS," without a warranty of any kind. ALL
EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND
ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A
RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES.
IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT
OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR
PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS
BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
You acknowledge that this software is not designed, licensed or intended for
use in the design, construction, operation or maintenance of any nuclear facility.
*/
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#define TRUE 1
#define FALSE 0
#else
#define omp_get_thread_num() 0
#define omp_get_num_threads() 1
#endif
int main()
{
int i, n = 5;
int a, a_shared;
#ifdef _OPENMP
(void) omp_set_dynamic(FALSE);
if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");}
(void) omp_set_num_threads(3);
#endif
#pragma omp parallel for private(i) private(a) shared(a_shared)
for (i=0; i<n; i++)
{
a = i+1;
printf("Thread %d has a value of a = %d for i = %d\n",
omp_get_thread_num(),a,i);
if ( i == n-1 ) a_shared = a;
} /*-- End of parallel for --*/
printf("Value of a after parallel for: a_shared = %d\n",a_shared);
return(0);
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-8,12),ceild(4*t2-Nz-11,24));t3<=min(min(floord(4*Nt+Ny-9,24),floord(2*t1+Ny-3,24)),floord(4*t2+Ny-9,24));t3++) {
for (t4=max(max(ceild(t1-60,64),ceild(4*t2-Nz-115,128)),ceild(24*t3-Ny-115,128));t4<=min(min(min(floord(4*Nt+Nx-9,128),floord(2*t1+Nx-3,128)),floord(4*t2+Nx-9,128)),floord(24*t3+Nx+11,128));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(128*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
morn_list.c | /*
Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com>
Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include "morn_ptc.h"
struct HandleListCreate
{
MList *list;
int num;
void **data;
MMemory *memory;
int defrag_size;
int read_order;
};
void endListCreate(void *info)
{
struct HandleListCreate *handle = (struct HandleListCreate *)info;
mException((handle->list == NULL),EXIT,"invalid list");
if(handle->memory !=NULL) mMemoryRelease(handle->memory);
if(handle->data != NULL) mFree(handle->data);
mFree(handle->list);
}
#define HASH_ListCreate 0xfa6c59f
MList *ListCreate(int num,void **data)
{
MList *list = (MList *)mMalloc(sizeof(MList));
memset(list,0,sizeof(MList));
list->handle=mHandleCreate();
MHandle *hdl=mHandle(list,ListCreate);
struct HandleListCreate *handle = (struct HandleListCreate *)(hdl->handle);
handle->list = list;
if(num<0) num = 0;
handle->num = num;
list->num = num;
if(num>0)
{
handle->data = (void **)mMalloc(num*sizeof(void *));
if(!INVALID_POINTER(data)) memcpy(handle->data,data,num*sizeof(void *));
else memset(handle->data, 0,num*sizeof(void *));
}
else
mException((!INVALID_POINTER(data)),EXIT,"invalid input");
list->data = handle->data;
return list;
}
void mListRelease(MList *list)
{
mException(INVALID_POINTER(list),EXIT,"invalid input source list");
if(!INVALID_POINTER(list->handle))
mHandleRelease(list->handle);
}
void mListAppend(MList *list,int n)
{
mException(INVALID_POINTER(list),EXIT,"invalid input source list");
if(n<0) n=list->num+1;
else mException(n<list->num,EXIT,"invalid list append number");
struct HandleListCreate *handle= (struct HandleListCreate *)(((MHandle *)(list->handle->data[0]))->handle);
if(n<=handle->num)
{
if((list->data!= handle->data)&&(list->num>0))
memcpy(handle->data,list->data,list->num*sizeof(void *));
list->data = handle->data;
list->num = n;
return;
}
// printf("aaaaaaaaaaaaaa\n");
int num = list->num + MAX(MAX(128,n-list->num),(list->num)>>1);
void **list_data = (void **)mMalloc(num*sizeof(void *));
if(list->num>0)
memcpy(list_data,list->data,(list->num)*sizeof(void *));
memset(list_data+list->num,0,(num-list->num)*sizeof(void *));
if(handle->data != NULL) mFree(handle->data);
handle->data = list_data;
handle->num = num;
list->data = handle->data;
list->num = n;
}
void mListPlace(MList *list,void *data,int num,int size)
{
if(num<=0) return;
mException((size<=0),EXIT,"invalid input list element size");
int list_num = list->num;
mListAppend(list,list_num+num);
struct HandleListCreate *handle = (struct HandleListCreate *)(((MHandle *)(list->handle->data[0]))->handle);
void **idx = list->data+list_num;
if(handle->memory == NULL) handle->memory = mMemoryCreate(1,size*num,MORN_HOST);
else mMemoryAppend(handle->memory,size*num);
mMemoryIndex(handle->memory,num,size,&idx,1);
// printf("list_num=%d\n",list_num);
// printf("idx0=%p,list->data[0]=%p\n",idx[0],list->data[0]);
if(data==NULL) return;
char *p=(char *)data;
for(int i=0;i<num;i++) {memcpy(list->data[list_num+i],p,size);p+=size;}
}
// void mListOperate(MList *list,void (*func)(void *,void *),void *para)
// {
// for(int i=0;i<list->num;i++) func(list->data[i],para);
// }
// struct HandleListWrite
// {
// int defrag_size;
// };
// void endListWrite(void *info) {}
// #define HASH_ListWrite 0x40aea976
void *mListWrite(MList *list,int n,void *data,int size)
{
mException(INVALID_POINTER(list),EXIT,"invalid input source list");
mException((n>list->num),EXIT,"invalid write location %d(with list->num is %d)",n,list->num);
if(size<0)
{
mException((INVALID_POINTER(data)),EXIT,"invalid data to write,which is %p",data);
size = strlen((char *)data)+1;
}
struct HandleListCreate *handle0 = (struct HandleListCreate *)(((MHandle *)(list->handle->data[0]))->handle);
if(n<0) n = list->num;
if(handle0->memory == NULL) handle0->memory = mMemoryCreate(DFLT,DFLT,MORN_HOST);
void *ptr = mMemoryWrite(handle0->memory,data,size);
int flag = (n==list->num); if(!flag) flag=(list->data[n]==NULL);
if(flag)
{
if(n<handle0->num) list->num = n+1;
else mListAppend(list,DFLT);
list->data[n] = ptr;
}
else
{
list->data[n] = ptr;
handle0->defrag_size += size;
if(handle0->defrag_size>16384)
{
mListElementOperate(list,MemoryCollect,handle0->memory);
MemoryDefrag(handle0->memory);
handle0->defrag_size=0;
}
}
return list->data[n];
}
// struct HandleListRead
// {
// int read_order;
// };
// void endListRead(void *info) {}
// #define HASH_ListRead 0x537cc305
void *mListRead(MList *list,int n,void *data,int size)
{
mException(INVALID_POINTER(list),EXIT,"invalid input");
struct HandleListCreate *handle0 = (struct HandleListCreate *)(((MHandle *)(list->handle->data[0]))->handle);
// MHandle *hdl=mHandle(list,ListRead);
// struct HandleListRead *handle = (struct HandleListRead *)(hdl->handle);
// if(hdl->valid == 0) handle->read_order = -1;
// hdl->valid = 1;
if(n<0) n = handle0->read_order;
handle0->read_order = n+1;
if(n>=list->num) return NULL;
if(data!=NULL)
{
if(size>0) memcpy( data, list->data[n],size);
else strcpy((char *)data,(char *)list->data[n]);
}
return list->data[n];
}
void mListClear(MList *list)
{
list->num=0;
struct HandleListCreate *handle0 = (struct HandleListCreate *)(((MHandle *)(list->handle->data[0]))->handle);
if(handle0->memory!=NULL) mMemoryClear(handle0->memory);
}
void mListReorder(MList *list)
{
mException(INVALID_POINTER(list),EXIT,"invalid input source list");
void **data = list->data;
int list_num = list->num;
void *buff;
int i;
for(i=0;i<list_num;i++)
{
int j = mRand(0,list_num);
buff = data[i]; data[i] = data[j]; data[j] = buff;
}
}
void mListCopy(MList *src,MList *dst)
{
mListAppend(dst,src->num);
struct HandleListCreate *src_handle = (struct HandleListCreate *)(((MHandle *)(src->handle->data[0]))->handle);
if(src_handle->memory == NULL)
{
memcpy(dst->data,src->data,src->num*sizeof(void *));
return;
}
struct HandleListCreate *dst_handle = (struct HandleListCreate *)(((MHandle *)(dst->handle->data[0]))->handle);
if(dst_handle->memory == NULL)
dst_handle->memory = mMemoryCreate(DFLT,DFLT,MORN_HOST);
mMemoryCopy(src_handle->memory,&(src->data),dst_handle->memory,&(src->data),1,&(src->num));
}
void mListMerge(MList *list1,MList *list2,MList *dst)
{
mListAppend(dst,list1->num+list2->num);
struct HandleListCreate *handle1 = (struct HandleListCreate *)(((MHandle *)(list1->handle->data[0]))->handle);
struct HandleListCreate *handle2 = (struct HandleListCreate *)(((MHandle *)(list2->handle->data[0]))->handle);
struct HandleListCreate *dst_handle =(struct HandleListCreate *)(((MHandle *)(dst->handle->data[0]))->handle);
int num1 = list1->num;
int num2 = list2->num;
if(dst==list1)
{
memcpy(dst->data+num1,list2->data,num2*sizeof(void *));
mFree(list2->data);list2->data = NULL;list2->num = 0;
}
else if(dst==list2)
{
memcpy(dst->data+num2,list1->data,num1*sizeof(void *));
mFree(list1->data);list1->data = NULL;list1->num = 0;
}
else
{
memcpy(dst->data ,list1->data,num1*sizeof(void *));
memcpy(dst->data+num1,list2->data,num2*sizeof(void *));
mFree(list1->data);list1->data = NULL;list1->num = 0;
mFree(list2->data);list2->data = NULL;list2->num = 0;
}
mMemoryMerge(handle1->memory,handle2->memory,dst_handle->memory);
mMemoryRelease(handle1->memory);handle1->memory = NULL;
mMemoryRelease(handle2->memory);handle2->memory = NULL;
}
void mListElementDelete(MList *list,int n)
{
mException(INVALID_POINTER(list),EXIT,"invalid input");
mException((n>=list->num),EXIT,"invalid input");
memmove(list->data+n,list->data+n+1,(list->num-n-1)*sizeof(void *));
list->num-=1;
}
void mListElementInsert(MList *list,int n,void *data,int size)
{
void *buff;
mListWrite(list,DFLT,data,size);
buff = list->data[list->num-1];
memmove(list->data+n+1,list->data+n,(list->num-n-1)*sizeof(void *));
list->data[n] = buff;
}
void mListElementOperate(MList *list,void *function,void *para)
{
void (*func)(void *,void *) = function;
mException(INVALID_POINTER(list)||(func==NULL),EXIT,"invalid input");
int i;
// #pragma omp parallel for
for(i=0;i<list->num;i++)
func(list->data[i],para);
}
void mListElementScreen(MList *list,void *function,void *para)
{
int (*func)(void *,void *) = function;
mException(INVALID_POINTER(list)||(func==NULL),EXIT,"invalid input");
int n=0;
for(int i=0;i<list->num;i++)
{
if(func(list->data[i],para))
{
list->data[n] = list->data[i];
n=n+1;
}
}
list->num = n;
}
void mListElementSelect(MList *list,void *function,void *para)
{
void (*func)(void *,void *,int *,int *,void *) = function;
mException(INVALID_POINTER(list)||(func==NULL),EXIT,"invalid input");
int n=0;
for(int i=0;i<list->num;i++)
{
if(list->data[i]==NULL)
continue;
int flag1=1;
for(int j=i+1;j<list->num;j++)
{
if(list->data[j] == NULL)
continue;
int flag2=1;
func(list->data[i],list->data[j],&flag1,&flag2,para);
if(flag2==0)
list->data[j]=NULL;
if(flag1==0)
break;
}
if(flag1==1)
{
list->data[n]=list->data[i];
n=n+1;
}
}
list->num = n;
}
/*
void mListSelect(MList *list,void (*func)(void *,void *,int *,int *,void *),void *para)
{
mException(INVALID_POINTER(list)||(func==NULL),EXIT,"invalid input");
void **data = list->data;
int *flag=mMalloc((list->num+2)*sizeof(int));
flag=flag+1;
memset(flag,DFLT,list->num*sizeof(int));
flag[-1]=list->num; flag[list->num]=-1;
int flag1,flag2;
while(1)
{
int ok=1;
for(int i=flag[i];i<list->num;i++)
{
if(flag[i]<0) continue;
for(int j=flag[i]+1;j<list->num;j++)
{
if(j==i) continue;
if((flag[j]>=0)&&(flag[j]<list->num)) continue;
func(data[i],data[j],&flag1,&flag2,para);
if(flag1==0) {flag[i] = j;ok=0;break;}
if(flag2==0) {flag[j] = i;ok=0;continue;}
}
if(flag[i]>=0) continue;
flag[i]=list->num;
}
if(ok) break;
}
int n=0;
for(int i=0;i<list->num;i++) if(flag[i]==list->num) {list->data[n]=data[i];n++;}
list->num = n;
mFree(flag-1);
}
*/
int mListCluster(MList *list,int *group,void *function,void *para)
{
int (*func)(void *,void *,void *) = function;
mException((INVALID_POINTER(list))||(group==NULL)||(func==NULL),EXIT,"invalid input");
char *valid = (char *)mMalloc(list->num * sizeof(char));
memset(valid,0 ,list->num*sizeof(char));
memset(group,DFLT,list->num*sizeof(int));
int i,j,k;
int n=0;
for(i=0;i<list->num;i++)
{
for(j=0;j<i;j++)
{
if(group[i]==group[j]) continue;
if(func(list->data[i],list->data[j],para)==1)//同类
{
if(group[i] == DFLT)
group[i] = group[j];
else
{
valid[group[j]] = 0;
int g = group[j];
for(k=0;k<i;k++)
if(group[k] == g) group[k] = group[i];
}
}
}
if(group[i] == DFLT)
{
group[i] = n;
valid[n] = 1;
n = n+1;
}
}
int *c = (int *)mMalloc(n *sizeof(int));
int num = 0;
for(i=0;i<n;i++)
{
if(valid[i] != 0)
{c[i] = num;num +=1;}
}
mFree(valid);
for(i=0;i<list->num;i++)
group[i] = c[group[i]];
mFree(c);
return num;
}
struct HandleListClassify
{
int *group;
char *valid;
MSheet *sheet;
int list_num;
};
void endListClassify(struct HandleListClassify *handle)
{
if(handle->group!=NULL) mFree(handle->group);
if(handle->valid!=NULL) mFree(handle->valid);
if(handle->sheet!=NULL) mSheetRelease(handle->sheet);
}
#define HASH_ListClassify 0x24c19acf
MSheet *mListClassify(MList *list,void *function,void *para)
{
int (*func)(void *,void *,void *) = function;
mException((INVALID_POINTER(list))||(func==NULL),EXIT,"invalid input");
MHandle *hdl = mHandle(list,ListClassify);
struct HandleListClassify *handle = (struct HandleListClassify *)(hdl->handle);
if((hdl->valid == 0)||(handle->list_num<list->num))
{
if(handle->list_num<list->num)
{
if(handle->group!=NULL) {mFree(handle->group);handle->group=NULL;}
if(handle->valid!=NULL) {mFree(handle->valid);handle->valid=NULL;}
}
if(handle->group==NULL) handle->group = (int *)mMalloc(list->num*sizeof(int ));
if(handle->valid==NULL) handle->valid = (char *)mMalloc(list->num*sizeof(char));
handle->list_num = list->num;
if(handle->sheet == NULL) handle->sheet = mSheetCreate();
hdl->valid = 1;
}
char *valid = handle->valid; int *group = handle->group;
memset(valid,0 ,list->num*sizeof(char));
memset(group,DFLT,list->num*sizeof(int));
int i,j,k;
int n=0;
for(i=0;i<list->num;i++)
{
for(j=0;j<i;j++)
{
if(group[i]==group[j]) continue;
if(func(list->data[i],list->data[j],para)==1)
{
if(group[i] == DFLT)
group[i] = group[j];
else
{
valid[group[j]] = 0;
int g = group[j];
for(k=0;k<i;k++)
if(group[k] == g) group[k] = group[i];
}
}
}
if(group[i] == DFLT)
{
group[i] = n;
valid[n] = 1;
n = n+1;
}
}
int *c = (int *)mMalloc(n *sizeof(int));
int num = 0;
for(i=0;i<n;i++)
{
if(valid[i] != 0)
{c[i] = num;num +=1;}
}
MSheet *sheet = handle->sheet;
mSheetClear(sheet);
mSheetRowAppend(sheet,num);
for(i=0;i<list->num;i++)
{
int g = c[group[i]];
int n = sheet->col[g];
mSheetColAppend(sheet,g,n+1);
sheet->data[g][n]=list->data[i];
}
mFree(c);
return sheet;
}
void _ListSort(void **list_data,int n,int (*func)(void *,void *,void *),void *para)
{
void *buff;
if(func(list_data[n-1],list_data[0],para)<0) {buff=list_data[n-1];list_data[n-1]=list_data[0];list_data[0]=buff;}
if(n==2) return;
if(func(list_data[ 1],list_data[0],para)<0) {buff=list_data[ 0];list_data[ 0]=list_data[1];}
else if(func(list_data[n-1],list_data[1],para)<0) {buff=list_data[n-1];list_data[n-1]=list_data[1];}
else buff=list_data[ 1];
if(n==3) {list_data[1]=buff;return;}
int i=1;int j=n-2;
while(1)
{
while(func(list_data[j],buff,para)>=0) {j=j-1;if(j==i) goto ListSort_next;}
list_data[i] = list_data[j]; i=i+1;if(i==j) goto ListSort_next;
while(func(list_data[i],buff,para)<=0) {i=i+1;if(i==j) goto ListSort_next;}
list_data[j] = list_data[i]; j=j-1;if(i==j) goto ListSort_next;
}
ListSort_next:
list_data[i]=buff;
if( i >1) _ListSort(list_data , i ,func,para);
if(n-i-1>1) _ListSort(list_data+i+1,n-i-1,func,para);
}
void mListSort(MList *list,void *function,void *para)
{
int (*func)(void *,void *,void *) = function;
mException((INVALID_POINTER(list))||(func==NULL),EXIT,"invalid input");
if(list->num<=1)return;
_ListSort(list->data,list->num,func,para);
}
struct HandleStack
{
volatile int order;
};
void endStack(void *info) {}
#define HASH_Stack 0x8c4d4c73
void *mStackWrite(MList *stack,void *data,int size)
{
mException(INVALID_POINTER(stack),EXIT,"invalid stack");
MHandle *hdl=mHandle(stack,Stack);
struct HandleStack *handle = (struct HandleStack *)(hdl->handle);
if(hdl->valid == 0) handle->order = -1;
hdl->valid = 1;
if(handle->order==stack->num-1) return NULL;
mAtomicAdd(&(handle->order),1);
return mListWrite(stack,handle->order,data,size);
}
void *mStackRead(MList *stack,void *data,int size)
{
mException(INVALID_POINTER(stack),EXIT,"invalid stack");
MHandle *hdl=mHandle(stack,Stack);
struct HandleStack *handle = (struct HandleStack *)(hdl->handle);
if(hdl->valid == 0) return NULL;
if(handle->order <0) return NULL;
int order = mAtomicSub(&(handle->order),1);
void *p=stack->data[order];
if(data!=NULL)
{
if(size<=0) strcpy((char *)data,(char *)p);
else memcpy(data,p,size);
}
return p;
}
int mStackSize(MList *stack)
{
mException(INVALID_POINTER(stack),EXIT,"invalid stack");
MHandle *hdl=mHandle(stack,Stack);
struct HandleStack *handle = (struct HandleStack *)(hdl->handle);
if(hdl->valid == 0) handle->order = -1;
hdl->valid = 1;
return (handle->order+1);
}
struct HandleQueue
{
volatile int read_order;
volatile int write_order;
volatile int flag;
};
void endQueue(void *info) {}
#define HASH_Queue 0xd98b43dc
int mQueueSize(MList *queue)
{
mException(INVALID_POINTER(queue),EXIT,"invalid queue");
MHandle *hdl=mHandle(queue,Queue);
struct HandleQueue *handle = (struct HandleQueue *)(hdl->handle);
if(handle->flag>0) return queue->num;
if(handle->flag<0) return 0;
int n = handle->write_order - handle->read_order;
return ((n>0)?n:(queue->num+n));
}
void *mQueueWrite(MList *queue,void *data,int size)
{
mException(INVALID_POINTER(queue),EXIT,"invalid queue");
mException(queue->num<=0,EXIT,"invalid queue");
MHandle *hdl=mHandle(queue,Queue);
struct HandleQueue *handle = (struct HandleQueue *)(hdl->handle);
if(hdl->valid == 0) {handle->read_order=0;handle->write_order=0;}
hdl->valid = 1;
if(handle->flag>0) return NULL;
int order=mAtomicAdd(&(handle->write_order),1);
if(order>=queue->num) order=order-queue->num;
void *p = mListWrite(queue,order,data,size);
mAtomicCompare(&(handle->write_order),queue->num,0);
handle->flag =(handle->write_order == handle->read_order)?1:0;
return p;
}
void *mQueueRead(MList *queue,void *data,int size)
{
mException(INVALID_POINTER(queue),EXIT,"invalid queue");
mException(queue->num<=0,EXIT,"invalid queue");
MHandle *hdl=mHandle(queue,Queue);
struct HandleQueue *handle = (struct HandleQueue *)(hdl->handle);
if(hdl->valid == 0) return NULL;
if(handle->flag<0) return NULL;
int order = mAtomicAdd(&(handle->read_order),1);
void *p = queue->data[order];
mAtomicCompare(&(handle->read_order),queue->num,0);
handle->flag =(handle->write_order == handle->read_order)?-1:0;
if(data!=NULL)
{
if(size<=0) strcpy((char *)data,(char *)p);
else memcpy(data,p,size);
}
return p;
}
// struct HashElement
// {
// int hash;
// void *data;
// };
// struct HandleHashList
// {
// int num;
// };
// void mHashList(MList *list,void *data,int size)
// {
// if(list->size <
/*
struct HandleBuffer
{
int proc_num;
int *order;
unsigned char *state;
};
void endBuffer(void *info)
{
struct HandleBuffer *handle = info;
if(handle->state != NULL) mFree(handle->state);
}
#define HASH_Buffer 0xcb4df739
int BufferRead(MList *buffer,int ID,struct HandleBuffer *handle)
{
int proc_num = handle->proc_num;
int order = handle->order[ID];
if(((ID >0)&&(handle->order[ID-1]==order))||((ID==0)&&(handle->order[proc_num-1]==order)))
return DFLT;
int state = handle->state[order];
if((state&1 == 1)||(order<0))
{
order = order + 1;
if(order == buffer->num)
{
if(handle->order[handle->proc_num-1]<0) return DFLT;
order = 0;
}
handle->state[handle->order[ID]] = 0;
handle->order[ID] = order;
return BufferRead(buffer,ID,handle);
}
return order;
}
void *mBufferSet(MList *buffer,int volume,int proc_num)
{
mException(INVALID_POINTER(buffer),EXIT,"invalid buffer");
if(volume>0)
{
if(buffer->num>volume) buff->num = volume;
else mListAppend(buff,volume);
}
mException(buffer->num<=1,EXIT,"invalid buffer");
mException((proc_num<=0),EXIT,"invalid proc_num");
MHandle *hdl;ObjectHandle(buffer,Buffer,hdl);
struct HandleBuffer *handle = hdl->handle;
if(hdl->valid == 0)
{
handle->order = mMalloc(proc_num*sizeof(int));
memset(handle->order,-1,proc_num*sizeof(int));
handle->proc_num = proc_num;
handle->state = mMalloc(buffer->num*sizeof(unsigned char));
memset(handle->state,0,buffer->num*sizeof(unsigned char));
}
hdl->valid = 1;
}
void *mBufferWrite(MList *buffer,int ID,void *data,int size)
{
mException(INVALID_POINTER(buffer),EXIT,"invalid buffer");
MHandle *hdl;ObjectHandle(buffer,Buffer,hdl);
struct HandleBuffer *handle = hdl->handle;
mException((hdl->valid == 0),EXIT,"invalid buffer");
int proc_num = handle->proc_num;
mException((ID>=proc_num)||(ID=<0),EXIT,"invalid ID");
int order = handle->order[ID];
if((handle->state[order]&2!=0)||(order<0))
{
order = order+1;
if(order==buffer->num) order=0;
if((ID==0)&&(state[order]!=0)) return NULL;
if((ID >0)&&(state[order]!=4)) return NULL;
handle->state[handle->order] = 4;
handle->order[ID] = order;
}
void *p = mListWrite(buffer,order,data,size);
handle->state[order] = (handle->state[order])|2;
return p;
}
void mBufferRead(MList *buffer,int ID,void *data,int size)
{
mException(INVALID_POINTER(buffer),EXIT,"invalid buffer");
MHandle *hdl;ObjectHandle(buffer,Buffer,hdl);
struct HandleBuffer *handle = hdl->handle;
mException((hdl->valid == 0),EXIT,"invalid buffer");
int proc_num = handle->proc_num;
mException((ID>=proc_num)||(ID=<0),EXIT,"invalid ID");
int order = handle->order[ID];
if((handle->state[order]&1!=0)||(order<0))
{
order = order+1;
if(order==buffer->num)
{
if(handle->order[proc_num-1]< 0) return NULL;
order=0;
}
if(ID>0)
if(handle->order[ID -1]==order) return NULL;
else if(proc_num>1)
if(handle->order[proc_num-1]==order) return NULL;
handle->state[handle->order] = 0;
handle->order = order;
}
void *p = mListRead(buffer,order,data,size);
*/
|
GB_binop__islt_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__islt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__islt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint32)
// A*D function (colscale): GB (_AxD__islt_uint32)
// D*A function (rowscale): GB (_DxB__islt_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint32)
// C=scalar+B GB (_bind1st__islt_uint32)
// C=scalar+B' GB (_bind1st_tran__islt_uint32)
// C=A+scalar GB (_bind2nd__islt_uint32)
// C=A'+scalar GB (_bind2nd_tran__islt_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_UINT32 || GxB_NO_ISLT_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__islt_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
openmp.h | /* ----------------------------------------------------------------------
miniMD is a simple, parallel molecular dynamics (MD) code. miniMD is
an MD microapplication in the Mantevo project at Sandia National
Laboratories ( http://www.mantevo.org ). The primary
authors of miniMD are Steve Plimpton (sjplimp@sandia.gov) , Paul Crozier
(pscrozi@sandia.gov) and Christian Trott (crtrott@sandia.gov).
Copyright (2008) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This library is free software; you
can redistribute it and/or modify it under the terms of the GNU Lesser
General Public License as published by the Free Software Foundation;
either version 3 of the License, or (at your option) any later
version.
This library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
USA. See also: http://www.gnu.org/licenses/lgpl.txt .
For questions, contact Paul S. Crozier (pscrozi@sandia.gov) or
Christian Trott (crtrott@sandia.gov).
Please read the accompanying README and LICENSE files.
---------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
OpenMP wrapper for headerfile
-defines fake OpenMP functions for compilation without OpenMP support
-"#pragma omp" statements will be ignored by compiler automatically but
could be ifdef'd in order to get rid of Compiler warnings
---------------------------------------------------------------------- */
#ifdef _OPENMP
#include <omp.h>
#else
inline int omp_get_thread_num()
{
return 0;
}
inline int omp_get_max_threads()
{
return 1;
}
inline int omp_set_num_threads(int num_threads)
{
return 1;
}
inline int __sync_fetch_and_add(int* ptr, int value)
{
int tmp = *ptr;
ptr[0] += value;
return tmp;
}
#endif
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(16*t2-Nz+5,8)),t1),2*t1-2*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(8*t1+Ny+7,8)),floord(16*t2+Ny+3,8)),floord(16*t1-16*t2+Nz+Ny+5,8));t3++) {
for (t4=max(max(max(0,ceild(t1-255,256)),ceild(16*t2-Nz-2035,2048)),ceild(8*t3-Ny-2035,2048));t4<=min(min(min(min(floord(4*Nt+Nx-9,2048),floord(8*t1+Nx+7,2048)),floord(16*t2+Nx+3,2048)),floord(8*t3+Nx-5,2048)),floord(16*t1-16*t2+Nz+Nx+5,2048));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),2*t3),Nt-1),2*t1+3),4*t2+2),512*t4+510);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(2048*t4,4*t5+4);
ubv=min(2048*t4+2047,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
1875.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if (image->alpha_trait != BlendPixelTrait || image->colorspace != sRGBColorspace)
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
image->alpha_trait=BlendPixelTrait;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char ) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(Image *image,
const unsigned char *blocks,size_t length,
MagickBooleanType *has_merged_image,ExceptionInfo *exception)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
char
value[MagickPathExtent];
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.x);
(void) SetImageProperty(image,"tiff:XResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.y);
(void) SetImageProperty(image,"tiff:YResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
*has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
if (type == 0)
{
if (packet_size == 1)
SetPixelIndex(image,ScaleQuantumToChar(pixel),q);
else
SetPixelIndex(image,ScaleQuantumToShort(pixel),q);
}
color=image->colormap+(ssize_t) ConstrainColormapIndex(image,
(ssize_t) GetPixelIndex(image,q),exception);
if ((type == 0) && (channels > 1))
return;
else
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
{
status=MagickFalse;
break;
}
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
/*
else if (packet_size == 4)
{
TODO: Figure out what to do there.
}
*/
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
/* TODO: Remove this when we figure out how to support this */
if ((compression == ZipWithPrediction) && (image->depth == 32))
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)");
return(MagickFalse);
}
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
register ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
register ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const ImageInfo *image_info,
const size_t index)
{
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,(size_t) count);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
if (size == 0)
return(MagickTrue);
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
x,
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
layer_info[i].page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].page.x=(ssize_t) ReadBlobSignedLong(image);
y=(ssize_t) ReadBlobSignedLong(image);
x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].page.width=(size_t) (x-layer_info[i].page.x);
layer_info[i].page.height=(size_t) (y-layer_info[i].page.y);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(image_info,i) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
has_merged_image,
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
imageListLength;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
(void) SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
}
else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,(size_t) (psd_info.depth < 16 ?
256 : 65536),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(image,blocks,(size_t) length,
&has_merged_image,exception);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
imageListLength=GetImageListLength(image);
if ((has_merged_image != MagickFalse) || (imageListLength == 1))
has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image,
&psd_info,exception);
if ((has_merged_image == MagickFalse) && (imageListLength == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (has_merged_image == MagickFalse)
{
Image
*merged;
if (imageListLength == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(image_info,i++) == MagickFalse)
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
#define CHUNK 16384
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK,
sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) CHUNK;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) CHUNK-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if (next_image->storage_class != PseudoClass)
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if (next_image->storage_class == PseudoClass)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
layerramdistancetransform.h | /*********************************************************************************
*
* Inviwo - Interactive Visualization Workshop
*
* Copyright (c) 2017-2021 Inviwo Foundation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************************/
#pragma once
#include <modules/base/basemoduledefine.h>
#include <inviwo/core/util/indexmapper.h>
#include <inviwo/core/datastructures/image/layer.h>
#include <inviwo/core/datastructures/image/layerram.h>
#include <inviwo/core/datastructures/image/layerramprecision.h>
#ifdef IVW_USE_OPENMP
#include <omp.h>
#endif
namespace inviwo {
namespace util {
/**
* Implementation of Euclidean Distance Transform according to Saito's algorithm:
* T. Saito and J.I. Toriwaki. New algorithms for Euclidean distance transformations
* of an n-dimensional digitized picture with applications. Pattern Recognition, 27(11).
* pp. 1551-1565, 1994.
* http://www.cs.jhu.edu/~misha/ReadingSeminar/Papers/Saito94.pdf
*
* Calculates the distance in base mat space
* * Predicate is a function of type (const T &value) -> bool to deside if a value in the input
* is a "feature".
* * ValueTransform is a function of type (const U& squaredDist) -> U that is appiled to all
* squared distance values at the end of the calculation.
* * ProcessCallback is a function of type (double progress) -> void that is called with a value
* from 0 to 1 to indicate the progress of the calculation.
*/
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void layerRAMDistanceTransform(const LayerRAMPrecision<T>* inLayer,
LayerRAMPrecision<U>* outDistanceField, const Matrix<2, U> basis,
const size2_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename T, typename U>
void layerRAMDistanceTransform(const LayerRAMPrecision<T>* inVolume,
LayerRAMPrecision<U>* outDistanceField, const Matrix<2, U> basis,
const size2_t upsample);
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField,
const size2_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback);
template <typename U, typename ProgressCallback>
void layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField,
const size2_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale, ProgressCallback callback);
template <typename U>
void layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField,
const size2_t upsample, double threshold, bool normalize, bool flip,
bool square, double scale);
} // namespace util
template <typename T, typename U, typename Predicate, typename ValueTransform,
typename ProgressCallback>
void util::layerRAMDistanceTransform(const LayerRAMPrecision<T>* inLayer,
LayerRAMPrecision<U>* outDistanceField,
const Matrix<2, U> basis, const size2_t upsample,
Predicate predicate, ValueTransform valueTransform,
ProgressCallback callback) {
#ifdef IVW_USE_OPENMP
omp_set_num_threads(std::thread::hardware_concurrency());
#endif
using int64 = glm::int64;
auto square = [](auto a) { return a * a; };
callback(0.0);
const T* src = inLayer->getDataTyped();
U* dst = outDistanceField->getDataTyped();
const i64vec2 srcDim{inLayer->getDimensions()};
const i64vec2 dstDim{outDistanceField->getDimensions()};
const i64vec2 sm{upsample};
const auto squareBasis = glm::transpose(basis) * basis;
const Vector<2, U> squareBasisDiag{squareBasis[0][0], squareBasis[1][1]};
const Vector<2, U> squareVoxelSize{squareBasisDiag / Vector<2, U>{dstDim * dstDim}};
const Vector<2, U> invSquareVoxelSize{Vector<2, U>{1.0f} / squareVoxelSize};
{
const auto maxdist = glm::compMax(squareBasisDiag);
bool orthogonal = true;
for (size_t i = 0; i < squareBasis.length(); i++) {
for (size_t j = 0; j < squareBasis.length(); j++) {
if (i != j) {
if (std::abs(squareBasis[i][j]) > 10.0e-8 * maxdist) {
orthogonal = false;
break;
}
}
}
}
if (!orthogonal) {
LogWarnCustom(
"layerRAMDistanceTransform",
"Calculating the distance transform on a non-orthogonal layer will not give "
"correct values");
}
}
if (srcDim * sm != dstDim) {
throw Exception(
"DistanceTransformRAM: Dimensions does not match src = " + toString(srcDim) +
" dst = " + toString(dstDim) + " scaling = " + toString(sm),
IVW_CONTEXT_CUSTOM("layerRAMDistanceTransform"));
}
util::IndexMapper<2, int64> srcInd(srcDim);
util::IndexMapper<2, int64> dstInd(dstDim);
auto is_feature = [&](const int64 x, const int64 y) {
return predicate(src[srcInd(x / sm.x, y / sm.y)]);
};
// first pass, forward and backward scan along x
// result: min distance in x direction
#ifdef IVW_USE_OPENMP
#pragma omp parallel for
#endif
for (int64 y = 0; y < dstDim.y; ++y) {
// forward
U dist = static_cast<U>(dstDim.x);
for (int64 x = 0; x < dstDim.x; ++x) {
if (!is_feature(x, y)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y)] = squareVoxelSize.x * square(dist);
}
// backward
dist = static_cast<U>(dstDim.x);
for (int64 x = dstDim.x - 1; x >= 0; --x) {
if (!is_feature(x, y)) {
++dist;
} else {
dist = U(0);
}
dst[dstInd(x, y)] = std::min<U>(dst[dstInd(x, y)], squareVoxelSize.x * square(dist));
}
}
// second pass, scan y direction
// for each voxel v(x,y,z) find min_i(data(x,i,z) + (y - i)^2), 0 <= i < dimY
// result: min distance in x and y direction
callback(0.45);
#ifdef IVW_USE_OPENMP
#pragma omp parallel
#endif
{
std::vector<U> buff;
buff.resize(dstDim.y);
#ifdef IVW_USE_OPENMP
#pragma omp for
#endif
for (int64 x = 0; x < dstDim.x; ++x) {
// cache column data into temporary buffer
for (int64 y = 0; y < dstDim.y; ++y) {
buff[y] = dst[dstInd(x, y)];
}
for (int64 y = 0; y < dstDim.y; ++y) {
auto d = buff[y];
if (d != U(0)) {
const auto rMax = static_cast<int64>(std::sqrt(d * invSquareVoxelSize.y)) + 1;
const auto rStart = std::min(rMax, y - 1);
const auto rEnd = std::min(rMax, dstDim.y - y);
for (int64 n = -rStart; n < rEnd; ++n) {
const auto w = buff[y + n] + squareVoxelSize.y * square(n);
if (w < d) d = w;
}
}
dst[dstInd(x, y)] = d;
}
}
}
// scale data
callback(0.9);
const int64 layerSize = dstDim.x * dstDim.y;
#ifdef IVW_USE_OPENMP
#pragma omp parallel for
#endif
for (int64 i = 0; i < layerSize; ++i) {
dst[i] = valueTransform(dst[i]);
}
callback(1.0);
}
template <typename T, typename U>
void util::layerRAMDistanceTransform(const LayerRAMPrecision<T>* inLayer,
LayerRAMPrecision<U>* outDistanceField,
const Matrix<2, U> basis, const size2_t upsample) {
util::layerRAMDistanceTransform(
inLayer, outDistanceField, basis, upsample,
[](const T& val) { return util::glm_convert_normalized<double>(val) > 0.5; },
[](const U& squareDist) {
return static_cast<U>(std::sqrt(static_cast<double>(squareDist)));
},
[](double f) {});
}
template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback>
void util::layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField,
const size2_t upsample, Predicate predicate,
ValueTransform valueTransform, ProgressCallback callback) {
const auto inputLayerRep = inLayer->getRepresentation<LayerRAM>();
inputLayerRep->dispatch<void, dispatching::filter::Scalars>([&](const auto lrprecision) {
layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample,
predicate, valueTransform, callback);
});
}
template <typename U, typename ProgressCallback>
void util::layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField,
const size2_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale, ProgressCallback progress) {
const auto inputLayerRep = inLayer->getRepresentation<LayerRAM>();
inputLayerRep->dispatch<void, dispatching::filter::Scalars>([&](const auto lrprecision) {
using ValueType = util::PrecisionValueType<decltype(lrprecision)>;
const auto predicateIn = [threshold](const ValueType& val) { return val < threshold; };
const auto predicateOut = [threshold](const ValueType& val) { return val > threshold; };
const auto normPredicateIn = [threshold](const ValueType& val) {
return util::glm_convert_normalized<double>(val) < threshold;
};
const auto normPredicateOut = [threshold](const ValueType& val) {
return util::glm_convert_normalized<double>(val) > threshold;
};
const auto valTransIdent = [scale](const float& squareDist) {
return static_cast<float>(scale * squareDist);
};
const auto valTransSqrt = [scale](const float& squareDist) {
return static_cast<float>(scale * std::sqrt(squareDist));
};
if (normalize && square && flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, normPredicateIn, valTransIdent, progress);
} else if (normalize && square && !flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, normPredicateOut, valTransIdent, progress);
} else if (normalize && !square && flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, normPredicateIn, valTransSqrt, progress);
} else if (normalize && !square && !flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, normPredicateOut, valTransSqrt, progress);
} else if (!normalize && square && flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, predicateIn, valTransIdent, progress);
} else if (!normalize && square && !flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, predicateOut, valTransIdent, progress);
} else if (!normalize && !square && flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, predicateIn, valTransSqrt, progress);
} else if (!normalize && !square && !flip) {
util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(),
upsample, predicateOut, valTransSqrt, progress);
}
});
}
template <typename U>
void util::layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField,
const size2_t upsample, double threshold, bool normalize,
bool flip, bool square, double scale) {
util::layerDistanceTransform(inLayer, outDistanceField, upsample, threshold, normalize, flip,
square, scale, [](double) {});
}
} // namespace inviwo
|
pr81052.c | /* PR middle-end/81052 */
/* { dg-do compile } */
/* { dg-options "-fopenmp-simd -O2" } */
int
foo (int x, int y)
{
int i;
#pragma omp simd
for (i = x; i < y; ++i)
return 0; /* { dg-error "invalid branch to/from OpenMP structured block" } */
return 1;
}
#ifdef __cplusplus
template <typename T>
T
bar (T x, T y)
{
T i;
#pragma omp simd
for (i = x; i < y; ++i)
return 0; /* { dg-error "invalid branch to/from OpenMP structured block" "" { target c++ } } */
return 1;
}
int x = bar (1, 7);
#endif
|
persistent.c | #include "persistent.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <omp.h>
int compute_commit_dsv(Box* box_arr) {
int count = 1;
int thread_count;
//omp_set_dynamic(0); // Explicitly disable dynamic teams
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel shared(box_arr)
{
#pragma omp master
thread_count=omp_get_num_threads();
do {
#pragma omp for
for (int i = 0; i < NUM_BOXES; i++) {
box_arr[i].waat = 0;
// Get weighted average of top neighbours
if (box_arr[i].num_top != 0) {
int j;
for (j = 0; j < box_arr[i].num_top; j++) {
int cur_topid = box_arr[i].top_ids[j];
int overlap = box_arr[i].top_ov[j];
box_arr[i].waat = box_arr[i].waat + box_arr[cur_topid].dsv * overlap;
}
}
else {
box_arr[i].waat = box_arr[i].waat + box_arr[i].width * box_arr[i].dsv;
}
// Get weighted average of bottom neighbours
if (box_arr[i].num_bottom != 0) {
int j;
for (j = 0; j < box_arr[i].num_bottom; j++) {
int cur_bottomid = box_arr[i].bottom_ids[j];
int overlap = box_arr[i].bottom_ov[j];
box_arr[i].waat = box_arr[i].waat + box_arr[cur_bottomid].dsv * overlap;
}
}
else {
box_arr[i].waat = box_arr[i].waat + box_arr[i].width * box_arr[i].dsv;
}
// Get weighted average of left neighbours
if (box_arr[i].num_left != 0) {
int j;
for (j = 0; j < box_arr[i].num_left; j++) {
int cur_leftid = box_arr[i].left_ids[j];
int overlap = box_arr[i].left_ov[j];
box_arr[i].waat = box_arr[i].waat + box_arr[cur_leftid].dsv * overlap;
}
}
else {
box_arr[i].waat = box_arr[i].waat + box_arr[i].height * box_arr[i].dsv;
}
// Get weighted average of right neighbours
if (box_arr[i].num_right != 0) {
int j;
for (j = 0; j < box_arr[i].num_right; j++) {
int cur_rightid = box_arr[i].right_ids[j];
int overlap = box_arr[i].right_ov[j];
box_arr[i].waat = box_arr[i].waat + box_arr[cur_rightid].dsv * overlap;
}
}
else {
box_arr[i].waat = box_arr[i].waat + box_arr[i].height * box_arr[i].dsv;
}
// Find the weighted average by dividing with the perimeter
box_arr[i].waat = box_arr[i].waat / box_arr[i].perimeter;
}
#pragma omp master
{
MIN_DSV = INT_MAX;
MAX_DSV = INT_MIN;
count++;
}
#pragma omp barrier
#pragma omp for reduction(max:MAX_DSV) reduction(min:MIN_DSV)
for (int i = 0; i < NUM_BOXES; i++) {
if (box_arr[i].waat > box_arr[i].dsv) {
box_arr[i].dsv = box_arr[i].dsv + AFFECT_RATE * (box_arr[i].waat - box_arr[i].dsv);
}
else {
box_arr[i].dsv = box_arr[i].dsv - AFFECT_RATE * (box_arr[i].dsv - box_arr[i].waat);
}
if (box_arr[i].dsv < MIN_DSV) MIN_DSV = box_arr[i].dsv;
if (box_arr[i].dsv > MAX_DSV) MAX_DSV = box_arr[i].dsv;
}
//if (count == 2) break;
if (MAX_DSV == 0) break;
} while (((MAX_DSV - MIN_DSV) / MAX_DSV) > EPSILON);
count--;
}
printf("A total of %d threads were created.\n", thread_count);
return count;
}
void readgridparam() {
// Assuming each line in the datafile is Max 500 characters
char line[MAXLEN] = "";
fflush(stdin);
if (fgets(line, sizeof(line), stdin)) {
// If the first line of the file contains -1, exit
if (line[0] == '-') {
fprintf(stderr, "First line of the file contains -1. Exiting....");
exit(EXIT_FAILURE);
}
else {
// We only expect 3 numbers in the first line
// <number of grid boxes> <num_grid_rows> <num_grid_cols>
int arr[3];
parseline(arr, line, 0);
NUM_BOXES = arr[0];
NUM_ROWS = arr[1];
NUM_COLS = arr[2];
}
}
else {
fprintf(stderr, "File may not exist or is empty. Exiting....");
exit(EXIT_FAILURE);
}
}
void populate(Box* box_arr) {
char line1[MAXLEN] = "";
int box_count = 0;
// Read rest of file and populate the data structure
fflush(stdin);
while (fgets(line1, sizeof(line1), stdin)) {
if (line1[0] == '-') {
break;
}
else if (!strcmp(line1, "")) continue;
else if (!(line1[0] >= '0' && line1[0] <= '9')) continue;
else {
// Create new Box element
// Get Box id;
int id[1];
parseline(id, line1, 0);
box_arr[box_count].id = id[0];
// Get location, height and width
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
int box_loc[4];
parseline(box_loc, line1, 0);
box_arr[box_count].up_left_y = box_loc[0];
box_arr[box_count].up_left_x = box_loc[1];
box_arr[box_count].height = box_loc[2];
box_arr[box_count].width = box_loc[3];
box_arr[box_count].perimeter = 2 * (box_arr[box_count].height + box_arr[box_count].width);
// Get top neighbours
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
int top_num;
top_num = parsefirst(line1);
box_arr[box_count].num_top = top_num;
int* toparr = (int*)malloc(top_num * sizeof(int));
int* toparrov = (int*)malloc(top_num * sizeof(int));
parseline(toparr, line1, 1);
box_arr[box_count].top_ids = toparr;
box_arr[box_count].top_ov = toparrov;
if (top_num == 0) {
box_arr[box_count].top_ids = NULL;
}
// Get bottom neighbours
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
int bottom_num;
bottom_num = parsefirst(line1);
box_arr[box_count].num_bottom = bottom_num;
int* bottomarr = (int*)malloc(bottom_num * sizeof(int));
int* bottomarrov = (int*)malloc(bottom_num * sizeof(int));
parseline(bottomarr, line1, 1);
box_arr[box_count].bottom_ids = bottomarr;
box_arr[box_count].bottom_ov = bottomarrov;
if (bottom_num == 0) {
box_arr[box_count].bottom_ids = NULL;
}
// Get left neighbours
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
int left_num;
left_num = parsefirst(line1);
box_arr[box_count].num_left = left_num;
int* leftarr = (int*)malloc(left_num * sizeof(int));
int* leftarrov = (int*)malloc(left_num * sizeof(int));
parseline(leftarr, line1, 1);
box_arr[box_count].left_ids = leftarr;
box_arr[box_count].left_ov = leftarrov;
if (left_num == 0) {
box_arr[box_count].left_ids = NULL;
}
// Get right neighbours
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
int right_num;
right_num = parsefirst(line1);
box_arr[box_count].num_right = right_num;
int* rightarr = (int*)malloc(right_num * sizeof(int));
int* rightarrov = (int*)malloc(right_num * sizeof(int));
parseline(rightarr, line1, 1);
box_arr[box_count].right_ids = rightarr;
box_arr[box_count].right_ov = rightarrov;
if (right_num == 0) {
box_arr[box_count].right_ids = NULL;
}
// Get dsv value
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
double dsv_val;
dsv_val = parsedsv(line1);
box_arr[box_count].dsv = dsv_val;
// Move to next box
box_count++;
fflush(stdin);
}
}
}
void parseline(int* num, char* path, int func) {
char c;
int i = 0, digit, number = 0;
int num_count = 0;
if (func == 0) i = 0;
if (func == 1) {
while (i < strlen(path) && path[i] >= '0' && path[i] <= '9') {
i++;
}
}
for (; i < strlen(path); i++)
{
if (path[i] >= '0' && path[i] <= '9') //to confirm it's a digit
{
number = 0;
do {
digit = path[i] - '0';
number = number * 10 + digit;
i++;
} while (i < strlen(path) && path[i] >= '0' && path[i] <= '9');
num[num_count] = number;
num_count++;
}
}
}
int parsefirst(char* path) {
int i = 0, digit, number = 0;
do {
digit = path[i] - '0';
number = number * 10 + digit;
i++;
} while (i < strlen(path) && path[i] >= '0' && path[i] <= '9');
return number;
}
double parsedsv(char* path) {
/*int i=0, digit;
double number = 0;
do {
digit = path[i] - '0';
number = number*10 + digit;
i++;
} while (i<strlen(path) && (path[i]>='0' && path[i]<='9' || ));
return number;*/
double number = 0;
number = strtod(path, NULL);
return number;
}
void calcoverlap(struct Box* box_arr) {
int i;
for (i = 0; i < NUM_BOXES; i++) {
// Calculate TOP overlap for each node.
// If 0, skip.
if (box_arr[i].num_top != 0) {
int j;
for (j = 0; j < box_arr[i].num_top; j++) {
// find right most of x_left and xtop_left
int cur_topid = box_arr[i].top_ids[j];
int len2, len1;
if (box_arr[i].up_left_x >= box_arr[cur_topid].up_left_x) len1 = box_arr[i].up_left_x;
else len1 = box_arr[cur_topid].up_left_x;
if ((box_arr[i].up_left_x + box_arr[i].width) <= (box_arr[cur_topid].up_left_x + box_arr[cur_topid].width)) len2 = (box_arr[i].up_left_x + box_arr[i].width);
else len2 = (box_arr[cur_topid].up_left_x + box_arr[cur_topid].width);
box_arr[i].top_ov[j] = abs(len2 - len1);
}
}
// Calculate BOTTOM overlap for each node.
// If 0, skip.
if (box_arr[i].num_bottom != 0) {
int j;
for (j = 0; j < box_arr[i].num_bottom; j++) {
// find right most of x_left and xbottom_left
int cur_bottomid = box_arr[i].bottom_ids[j];
int len2, len1;
if (box_arr[i].up_left_x >= box_arr[cur_bottomid].up_left_x) len1 = box_arr[i].up_left_x;
else len1 = box_arr[cur_bottomid].up_left_x;
// find left most of x_left + width and xbottom_left + its width
if ((box_arr[i].up_left_x + box_arr[i].width) <= (box_arr[cur_bottomid].up_left_x + box_arr[cur_bottomid].width)) len2 = (box_arr[i].up_left_x + box_arr[i].width);
else len2 = (box_arr[cur_bottomid].up_left_x + box_arr[cur_bottomid].width);
box_arr[i].bottom_ov[j] = abs(len2 - len1);
}
}
// Calculate left overlap for each node.
// If 0, skip.
if (box_arr[i].num_left != 0) {
int j;
for (j = 0; j < box_arr[i].num_left; j++) {
// find bottom most of y_left and yleft_left
int cur_leftid = box_arr[i].left_ids[j];
int len2, len1;
if (box_arr[i].up_left_y >= box_arr[cur_leftid].up_left_y) len1 = box_arr[i].up_left_y;
else len1 = box_arr[cur_leftid].up_left_y;
// find top most of y_left + height and yleft_left + its height
if ((box_arr[i].up_left_y + box_arr[i].height) <= (box_arr[cur_leftid].up_left_y + box_arr[cur_leftid].height)) len2 = (box_arr[i].up_left_y + box_arr[i].height);
else len2 = (box_arr[cur_leftid].up_left_y + box_arr[cur_leftid].height);
box_arr[i].left_ov[j] = abs(len2 - len1);
}
}
// Calculate right overlap for each node.
// If 0, skip.
if (box_arr[i].num_right != 0) {
int j;
for (j = 0; j < box_arr[i].num_right; j++) {
// find bottom most of y_left and yright_left
int cur_rightid = box_arr[i].right_ids[j];
int len2, len1;
if (box_arr[i].up_left_y >= box_arr[cur_rightid].up_left_y) len1 = box_arr[i].up_left_y;
else len1 = box_arr[cur_rightid].up_left_y;
// find top most of y_left + height and yright_left + its height
if ((box_arr[i].up_left_y + box_arr[i].height) <= (box_arr[cur_rightid].up_left_y + box_arr[cur_rightid].height)) len2 = (box_arr[i].up_left_y + box_arr[i].height);
else len2 = (box_arr[cur_rightid].up_left_y + box_arr[cur_rightid].height);
box_arr[i].right_ov[j] = abs(len2 - len1);
}
}
}
}
void printboxes(struct Box* box_arr) {
int i;
for (i = 0; i < NUM_BOXES; i++) {
printf("================================");
printf("\n\nBox id: %d\n", box_arr[i].id);
printf("Box left_X, left_y, height, width, perimiter: %d, %d, %d, %d, %d\n", box_arr[i].up_left_x, box_arr[i].up_left_y, box_arr[i].height, box_arr[i].width, box_arr[i].perimeter);
printf("Box top neighbours and overlap: ");
int j;
for (j = 0; j < box_arr[i].num_top; j++) {
printf("%d:%d, ", box_arr[i].top_ids[j], box_arr[i].top_ov[j]);
}
printf("\n");
printf("Box bottom neighbours and overlap: ");
for (j = 0; j < box_arr[i].num_bottom; j++) {
printf("%d:%d, ", box_arr[i].bottom_ids[j], box_arr[i].bottom_ov[j]);
}
printf("\n");
printf("Box left neighbours: ");
for (j = 0; j < box_arr[i].num_left; j++) {
printf("%d:%d, ", box_arr[i].left_ids[j], box_arr[i].left_ov[j]);
}
printf("\n");
printf("Box right neighbours: ");
for (j = 0; j < box_arr[i].num_right; j++) {
printf("%d:%d, ", box_arr[i].right_ids[j], box_arr[i].right_ov[j]);
}
printf("\n");
printf("Box dsv value: %lf", box_arr[i].dsv);
printf("\n");
}
}
|
transform.h | /*!
* Copyright 2018 XGBoost contributors
*/
#ifndef XGBOOST_COMMON_TRANSFORM_H_
#define XGBOOST_COMMON_TRANSFORM_H_
#include <dmlc/omp.h>
#include <dmlc/common.h>
#include <xgboost/data.h>
#include <utility>
#include <vector>
#include <type_traits> // enable_if
#include "xgboost/host_device_vector.h"
#include "xgboost/span.h"
#include "common.h"
#if defined (__CUDACC__)
#include "device_helpers.cuh"
#endif // defined (__CUDACC__)
namespace xgboost {
namespace common {
constexpr size_t kBlockThreads = 256;
namespace detail {
#if defined(__CUDACC__)
template <typename Functor, typename... SpanType>
__global__ void LaunchCUDAKernel(Functor _func, Range _range,
SpanType... _spans) {
for (auto i : dh::GridStrideRange(*_range.begin(), *_range.end())) {
_func(i, _spans...);
}
}
#endif // defined(__CUDACC__)
} // namespace detail
/*! \brief Do Transformation on HostDeviceVectors.
*
* \tparam CompiledWithCuda A bool parameter used to distinguish compilation
* trajectories, users do not need to use it.
*
* Note: Using Transform is a VERY tricky thing to do. Transform uses template
* argument to duplicate itself into two different types, one for CPU,
* another for CUDA. The trick is not without its flaw:
*
* If you use it in a function that can be compiled by both nvcc and host
* compiler, the behaviour is un-defined! Because your function is NOT
* duplicated by `CompiledWithCuda`. At link time, cuda compiler resolution
* will merge functions with same signature.
*/
template <bool CompiledWithCuda = WITH_CUDA()>
class Transform {
private:
template <typename Functor>
struct Evaluator {
public:
Evaluator(Functor func, Range range, int device, bool shard) :
func_(func), range_{std::move(range)},
shard_{shard},
device_{device} {}
/*!
* \brief Evaluate the functor with input pointers to HostDeviceVector.
*
* \tparam HDV... HostDeviceVectors type.
* \param vectors Pointers to HostDeviceVector.
*/
template <typename... HDV>
void Eval(HDV... vectors) const {
bool on_device = device_ >= 0;
if (on_device) {
LaunchCUDA(func_, vectors...);
} else {
LaunchCPU(func_, vectors...);
}
}
private:
// CUDA UnpackHDV
template <typename T>
Span<T> UnpackHDVOnDevice(HostDeviceVector<T>* _vec) const {
auto span = _vec->DeviceSpan();
return span;
}
template <typename T>
Span<T const> UnpackHDVOnDevice(const HostDeviceVector<T>* _vec) const {
auto span = _vec->ConstDeviceSpan();
return span;
}
// CPU UnpackHDV
template <typename T>
Span<T> UnpackHDV(HostDeviceVector<T>* _vec) const {
return Span<T> {_vec->HostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
template <typename T>
Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec) const {
return Span<T const> {_vec->ConstHostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
// Recursive unpack for Shard.
template <typename T>
void UnpackShard(int device, const HostDeviceVector<T> *vector) const {
vector->SetDevice(device);
}
template <typename Head, typename... Rest>
void UnpackShard(int device,
const HostDeviceVector<Head> *_vector,
const HostDeviceVector<Rest> *... _vectors) const {
_vector->SetDevice(device);
UnpackShard(device, _vectors...);
}
#if defined(__CUDACC__)
template <typename std::enable_if<CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
if (shard_)
UnpackShard(device_, _vectors...);
size_t range_size = *range_.end() - *range_.begin();
// Extract index to deal with possible old OpenMP.
// This deals with situation like multi-class setting where
// granularity is used in data vector.
size_t shard_size = range_size;
Range shard_range {0, static_cast<Range::DifferenceType>(shard_size)};
dh::safe_cuda(cudaSetDevice(device_));
const int GRID_SIZE =
static_cast<int>(DivRoundUp(*(range_.end()), kBlockThreads));
detail::LaunchCUDAKernel<<<GRID_SIZE, kBlockThreads>>>(
_func, shard_range, UnpackHDVOnDevice(_vectors)...);
}
#else
/*! \brief Dummy funtion defined when compiling for CPU. */
template <typename std::enable_if<!CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
LOG(FATAL) << "Not part of device code. WITH_CUDA: " << WITH_CUDA();
}
#endif // defined(__CUDACC__)
template <typename... HDV>
void LaunchCPU(Functor func, HDV*... vectors) const {
omp_ulong end = static_cast<omp_ulong>(*(range_.end()));
dmlc::OMPException omp_exc;
#pragma omp parallel for schedule(static)
for (omp_ulong idx = 0; idx < end; ++idx) {
omp_exc.Run(func, idx, UnpackHDV(vectors)...);
}
omp_exc.Rethrow();
}
private:
/*! \brief Callable object. */
Functor func_;
/*! \brief Range object specifying parallel threads index range. */
Range range_;
/*! \brief Whether sharding for vectors is required. */
bool shard_;
int device_;
};
public:
/*!
* \brief Initialize a Transform object.
*
* \tparam Functor A callable object type.
* \return A Evaluator having one method Eval.
*
* \param func A callable object, accepting a size_t thread index,
* followed by a set of Span classes.
* \param range Range object specifying parallel threads index range.
* \param devices GPUSet specifying GPUs to use, when compiling for CPU,
* this should be GPUSet::Empty().
* \param shard Whether Shard for HostDeviceVector is needed.
*/
template <typename Functor>
static Evaluator<Functor> Init(Functor func, Range const range,
int device,
bool const shard = true) {
return Evaluator<Functor> {func, std::move(range), device, shard};
}
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_TRANSFORM_H_
|
GB_binop__isge_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isge_int64
// A.*B function (eWiseMult): GB_AemultB__isge_int64
// A*D function (colscale): GB_AxD__isge_int64
// D*A function (rowscale): GB_DxB__isge_int64
// C+=B function (dense accum): GB_Cdense_accumB__isge_int64
// C+=b function (dense accum): GB_Cdense_accumb__isge_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isge_int64
// C=scalar+B GB_bind1st__isge_int64
// C=scalar+B' GB_bind1st_tran__isge_int64
// C=A+scalar GB_bind2nd__isge_int64
// C=A'+scalar GB_bind2nd_tran__isge_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_INT64 || GxB_NO_ISGE_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isge_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isge_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isge_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isge_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isge_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__isge_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isge_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isge_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isge_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__isge_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__isge_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
opi.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
int main(int argc, char **argv) {
//seed random number generator
// Q2b: get the number of threads to run with from agrv and
// add OpenMP API code to set number of threads here
if (argv <= 1) {
exit(1);
}
int arg1 = atoi(argv[0]);
omp_set_num_threads(10);
int Nthreads = 1;
struct drand48_data *drandData;
drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data));
// Q2c: add an OpenMP parallel region here, wherein each thread initializes
// one entry in drandData using srand48_r and seed based on thread number
#pragma omp parallel
{
long int seed = 0;
int rank = omp_get_thread_num();
srand48_r(seed, drandData+rank);
}
long long int Ntrials = 10000000;
//need running tallies
long long int Ntotal=0;
long long int Ncircle=0;
for (long long int n=0; n<Ntrials; n++) {
double rand1;
double rand2;
//gererate two random numbers (use the thread id to offset drandData)
drand48_r(drandData+0, &rand1);
drand48_r(drandData+0, &rand2);
double x = -1 + 2*rand1; //shift to [-1,1]
double y = -1 + 2*rand2;
//check if its in the circle
if (sqrt(x*x+y*y)<=1) Ncircle++;
Ntotal++;
if (n%100 ==0) {
double pi = 4.0*Ncircle/ (double) (n);
printf("Our estimate of pi is %g \n", pi);
}
}
double pi = 4.0*Ncircle/ (double) (Ntotal);
printf("Our final estimate of pi is %g \n", pi);
free(drandData);
return 0;
}
|
meanshift_static.h | #ifndef MEAN_SHIFT_STATIC_H
#define MEAN_SHIFT_STATIC_H
#include <algorithm>
#include <cmath>
#include "container.h"
#include "container_io.h"
#include <iostream>
#include "utils.h"
namespace mean_shift {
namespace omp {
template <typename T, const size_t N, const size_t D>
std::vector<vec<T, D>> cluster_points(mat<T, N, D>& data,
const size_t niter,
const float bandwidth,
const float radius,
const float min_distance,
const double eps,
const size_t num_threads) {
const float double_sqr_bdw = 2 * bandwidth * bandwidth;
vec<bool, N> has_stopped {false};
std::vector<vec<T, D>> centroids;
mat<T, N, D> new_data;
for (size_t i = 0; i < niter; ++i) {
#pragma omp parallel for default(none) \
shared(data, niter, bandwidth, eps, radius, double_sqr_bdw, has_stopped, centroids, new_data, min_distance) \
schedule(static) num_threads(num_threads)
for (size_t p = 0; p < N; ++p) {
if (has_stopped[p]) {
#pragma omp critical
{
if ((centroids.size() == 0) || (is_centroid(centroids, data[p], min_distance))) {
centroids.emplace_back(data[p]);
}
}
continue;
}
vec<T, D> new_position {};
float sum_weights = 0.;
for (size_t q = 0; q < N; ++q) {
double dist = calc_distance(data[p], data[q]);
if (dist <= radius) {
float gaussian = std::exp(- dist / double_sqr_bdw);
new_position = new_position + data[q] * gaussian;
sum_weights += gaussian;
}
}
new_position = new_position / sum_weights;
double shift = calc_distance(data[p], new_position);
if (shift <= eps) {
#pragma omp atomic write
has_stopped[p] = true;
}
#pragma omp critical
new_data[p] = new_position;
}
data = new_data;
if (std::all_of(has_stopped.begin(), has_stopped.end(), [](bool b) {return b;})) {
std::cout << "With eps = " << eps << " took " << i << " iterations!\n";
return centroids;
}
}
return centroids;
}
template <typename T, const size_t N, const size_t D>
std::vector<vec<T, D>> cluster_points(mat<T, N, D>& data,
const size_t niter,
const float bandwidth,
const float radius,
const float min_distance,
const size_t num_threads) {
const float double_sqr_bdw = 2 * bandwidth * bandwidth;
mat<T, N, D> new_data;
for (size_t i = 0; i < niter; ++i) {
#pragma omp parallel for default(none) \
shared(data, niter, bandwidth, radius, double_sqr_bdw, new_data) \
schedule(static) num_threads(num_threads)
for (size_t p = 0; p < N; ++p) {
vec<T, D> new_position {};
float sum_weights = 0.;
for (size_t q = 0; q < N; ++q) {
double dist = calc_distance(data[p], data[q]);
if (dist <= radius) {
float gaussian = std::exp(- dist / double_sqr_bdw);
new_position = new_position + data[q] * gaussian;
sum_weights += gaussian;
}
}
#pragma omp critical
new_data[p] = new_position / sum_weights;
}
data = new_data;
}
return reduce_to_centroids(data, min_distance);
}
} // namespace omp
} // namespace mean_shift
#endif |
3d_array_ptr_v1.c | // PASS: *
// RUN: ${CATO_ROOT}/src/scripts/cexecute_pass.py %s -o %t
// RUN: diff <(mpirun -np 2 %t) %s.reference_output
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main()
{
int*** M = (int***) malloc(sizeof(int**)*2);
M[0] = (int**)malloc(sizeof(int**)*2);
M[1] = (int**)malloc(sizeof(int**)*2);
M[0][0] = (int*)malloc(sizeof(int*)*2);
M[0][1] = (int*)malloc(sizeof(int*)*2);
M[1][0] = (int*)malloc(sizeof(int*)*2);
M[1][1] = (int*)malloc(sizeof(int*)*2);
#pragma omp parallel
{
M[0][0][0] = 42;
M[0][0][1] = 42;
M[0][1][0] = 42;
M[0][1][1] = 42;
M[1][0][0] = 46;
M[1][0][1] = 46;
M[1][1][0] = 46;
M[1][1][1] = 46;
#pragma omp barrier
printf("[\n[[%d,%d]\n[%d,%d]]\n[[%d,%d]\n[%d,%d]\n]\n", M[0][0][0],M[0][0][1],M[0][1][0],M[0][1][1],M[1][0][0],M[1][0][1],M[1][1][0],M[1][1][1]);
}
free(M[0][0]);
free(M[0][1]);
free(M[1][0]);
free(M[1][1]);
free(M[0]);
free(M[1]);
free(M);
}
|
reduce3.h | /*
* reduce3.h
*
* Created on: Dec 28, 2015
* Author: agibsonccc
*/
#ifndef REDUCE3_H_
#define REDUCE3_H_
#define EXTRA_PARAMS_LENGTH 10
#include <op.h>
#include <templatemath.h>
#include <helper_cuda.h>
#include <sharedmem.h>
#include <omp.h>
#include <pairwise_util.h>
#include <dll.h>
#include <shape.h>
#ifdef __JNI__
#include <jni.h>
#endif
#ifdef __CUDACC__
#include <cuda.h>
#include <cuda_runtime.h>
#endif
namespace functions {
namespace reduce3 {
/**
* Reduce involving
* 2 arrays
*/
template<typename T>
class Reduce3: public virtual functions::ops::Op<T> {
public:
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T postProcess(T reduction, Nd4jIndex n,T **extraParamsRef) = 0;
virtual
#ifdef __CUDACC__
__inline__ __host__ __device__
#endif
T startingValue(T *input) = 0;
virtual
#ifdef __CUDACC__
__inline__ __host__ __device__
#endif
T * generateExtraParams() = 0;
virtual
#ifdef __CUDACC__
__inline__ __host__ __device__
#endif
void finalizeExtraParams(T **extraParamsRef) = 0;
/**
*
* @param d1
* @param d2
* @param extraParams
* @return
*/
//an op for the kernel
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T op(T d1, T d2, T **extraParamsRef) = 0;
//calculate an update of the reduce operation
/**
*
* @param old
* @param opOutput
* @param extraParams
* @return
*/
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T update(T old, T opOutput, T **extraParamsRef) = 0;
/**
*
* @param old
* @param opOutput
* @param extraParams
* @return
*/
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T merge(T old, T opOutput, T **extraParamsRef) = 0;
/**
*
* @param d1
* @param d2
* @param extraParams
* @return
*/
//an op for the kernel
#ifdef __CUDACC__
virtual __device__
inline T opAtomic(T d1, T d2, T **extraParamsRef) = 0;
#endif
#ifdef __CUDACC__
/**
* Aggregate shared memory
* @param sPartialsRef
* @param tid
* @param extraParams
*/
virtual __inline__ __device__ void aggregatePartials(T **sPartialsRef, int tid, int numItems, T **extraParamsRef) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
T *sPartials = *sPartialsRef;
int floorPow2 = numItems;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
sPartials[tid - floorPow2] = update(sPartials[tid - floorPow2], sPartials[tid], extraParamsRef);
}
__syncthreads();
}
for (int activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads) {
sPartials[tid] = update(sPartials[tid], sPartials[tid + activeThreads], extraParamsRef);
}
__syncthreads();
}
}
/**
Perform a reduction
@param n the number of elements
@param xOffset the starting offset
@param dx the data to perform the reduction on
@param incx the increment on which to perform the reduction
@param extraParams extra parameters used for calculations
@param result where to store the result of the reduction
*/
virtual __inline__ __device__ void transformNoElementWiseStride(
T *dx,
int *xShapeInfo,
T *dy,
int *yShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int postProcessOrNot, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) {
Nd4jIndex n = shape::length(xShapeInfo);
int rank = shape::rank(xShapeInfo);
//shared memory space for storing intermediate results
//SharedMemory <T> val;
T *sPartials = (T *) manager->getSharedReductionBuffer(); //val.getPointer();
T startingVal = this->startingValue(dx);
sPartials[threadIdx.x] = startingVal;
int idx[MAX_RANK];
#pragma unroll
for(Nd4jIndex i = blockIdx.x * gridDim.x + threadIdx.x;i < n; i += gridDim.x * blockDim.x) {
shape::ind2subC(rank,shape::shapeOf(xShapeInfo),i, idx);
Nd4jIndex offset = shape::getOffset(0,shape::shapeOf(xShapeInfo),shape::stride(xShapeInfo),idx,rank);
Nd4jIndex yOffset = shape::getOffset(0,shape::shapeOf(yShapeInfo),shape::stride(yShapeInfo),idx,rank);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], this->opAtomic(dx[offset], dy[yOffset], &extraParams),&extraParams);
}
T **sPartialsRef = (T **) &sPartials;
aggregatePartials(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, n), &extraParams);
/**
* Look at something that uses the extra params
* and aggregates the extra values propelry.
*This will be used in summary stats too.
*/
// write result for this block to global mem
if (threadIdx.x == 0) {
if (postProcessOrNot) {
result[blockIdx.x] = postProcess(sPartials[0], n,&extraParams);
}
else {
result[blockIdx.x] = sPartials[0];
}
}
if(threadIdx.x == 0 && this->extraParamsLength() > 0)
this->finalizeExtraParams(&extraParams);
}
/**
*
*/
virtual __inline__ __device__ void execScalarCuda(
T *dx,
int *xShapeInfo,
T *dy,
int *yShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo, int *allocationBuffer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) {
// SharedMemory <T> val;
T *sPartials = (T *) manager->getSharedReductionBuffer(); // val.getPointer();
T startingVal = this->startingValue(dx);
Nd4jIndex length = shape::length(xShapeInfo);
int xElementWiseStride = shape::elementWiseStride(xShapeInfo);
int yElementWiseStride = shape::elementWiseStride(yShapeInfo);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
char xOrder = shape::order(xShapeInfo);
char yOrder = shape::order(yShapeInfo);
if(xOrder == yOrder) {
if (xElementWiseStride == 1 && yElementWiseStride == 1) {
for(Nd4jIndex i = threadIdx.x; i < length; i+= gridDim.x * blockDim.x) {
startingVal = update(startingVal, this->opAtomic(dx[i], dy[i], &extraParams), &extraParams);
}
}
else {
for(int i = threadIdx.x; i < length; i+= gridDim.x * blockDim.x) {
startingVal = update(startingVal, this->opAtomic(dx[i * xElementWiseStride], dy[i * yElementWiseStride], &extraParams), &extraParams);
}
}
sPartials[tid] = startingVal;
__syncthreads();
T **sPartialsRef = (T **) &sPartials;
aggregatePartials(sPartialsRef, tid, nd4j::math::nd4j_min<int>(blockDim.x, length), &extraParams);
/**
* Look at something that uses the extra params
* and aggregates the extra values properly.
*This will be used in summary stats too.
*/
// write result for this block to global mem
__syncthreads();
if (tid == 0) {
result[0] = postProcess(sPartials[0], length,&extraParams);
}
}
else {
int *xShape = shape::shapeOf(xShapeInfo);
int *xStride = shape::stride(xShapeInfo);
int *yStride = shape::stride(yShapeInfo);
T startingVal = this->startingValue(dx);
int n = shape::length(xShapeInfo);
//SharedMemory <T> val;
T *sPartials = (T *) manager->getSharedReductionBuffer(); //val.getPointer();
Nd4jIndex length = shape::length(xShapeInfo);
int xElementWiseStride = shape::elementWiseStride(xShapeInfo);
int yElementWiseStride = shape::elementWiseStride(yShapeInfo);
char xOrder = shape::order(xShapeInfo);
char yOrder = shape::order(yShapeInfo);
//int *idx = (int *) malloc(sizeof(int) * shape::rank(xShapeInfo));
int rank = shape::rank(xShapeInfo);
/*
long allocSize = sizeof(int) * rank;
int *idx = shape::cuMalloc(allocationBuffer, allocSize, manager);
*/
int idx[MAX_RANK];
//shared memory space for storing intermediate results
sPartials[threadIdx.x] = startingVal;
#pragma unroll
for(unsigned int i = tid ;i < n; i += gridDim.x * blockDim.x) {
shape::ind2sub(rank,shape::shapeOf(xShapeInfo),i,idx);
Nd4jIndex offset = shape::getOffset(0,shape::shapeOf(xShapeInfo),shape::stride(xShapeInfo),idx,rank);
Nd4jIndex yOffset = shape::getOffset(0,shape::shapeOf(yShapeInfo),shape::stride(yShapeInfo),idx,rank);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], this->opAtomic(dx[offset], dy[yOffset], &extraParams),&extraParams);
}
/*
if (rank > MAX_COORD && tid * allocSize > PREALLOC_SIZE - allocSize) {
free(idx);
}
*/
T **sPartialsRef = (T **) &sPartials;
aggregatePartials(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, length), &extraParams);
/**
* Look at something that uses the extra params
* and aggregates the extra values propelry.
*This will be used in summary stats too.
*/
// write result for this block to global mem
__syncthreads();
if (tid == 0) {
result[tid] = postProcess(sPartials[0], n,&extraParams);
}
}
}
/**
Perform a reduction
@param n the number of elements
@param xOffset the starting offset
@param dx the data to perform the reduction on
@param incx the increment on which to perform the reduction
@param extraParams extra parameters used for calculations
@param result where to store the result of the reduction
*/
virtual __inline__ __device__ void transform(
T *dx,
int *xShapeInfo,
T *dy,
int *yShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) {
/**
* Gpu information for the problem
*/
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int resultScalar;
__shared__ int xElementWiseStride;
__shared__ int yElementWiseStride;
//shared memory space for storing intermediate results
//SharedMemory <T> val;
T *sPartials = (T *) manager->getSharedReductionBuffer(); //val.getPointer();
T init = this->startingValue(dx);
sPartials[threadIdx.x] = init;
//length for the tad
__shared__ Nd4jIndex resultLength;
T reduction = this->startingValue(dx);
if (threadIdx.x == 0) {
if (resultShapeInfo != nullptr)
resultLength = shape::length(resultShapeInfo);
else resultLength = 1;
if (dimensionLength == 1) {
if (dimension == nullptr || dimension[0] == MAX_DIMENSION)
resultScalar = 1;
else
resultScalar = 0;
}
else
resultScalar = 0;
if (resultLength == 1)
resultScalar = 1;
/**
* The element wise stride belong longs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along long arr
* we can use arr.stride(1) as a representation
* along long which to iterate.
*/
int *xStride = shape::stride(xShapeInfo);
char xOrder = shape::order(xShapeInfo);
xElementWiseStride = shape::elementWiseStride(xShapeInfo);
yElementWiseStride = shape::elementWiseStride(yShapeInfo);
//printf("Order is: [%c], stride is: xElementStride: [%i], passed strides are: [%i], dimension: [%i], dimensionLength: [%i]\n", xOrder, xElementWiseStride, xStride[0], dimension[0], dimensionLength);
}
__syncthreads();
if (!resultScalar) {
__shared__ shape::TAD *tad;
if (threadIdx.x == 0) {
tad = new(manager->getTADSpace()) shape::TAD(); //(xShapeInfo,dimension,dimensionLength)
tad->setExternalBuffers((void *) manager);
tad->initWithExternalTAD(tadOnlyShapeInfo, xShapeInfo, dimension, dimensionLength);
//tad->init(xShapeInfo,dimension,dimensionLength);
//tad->createTadOnlyShapeInfo();
}
__syncthreads();
if(dimensionLength > 1) {
//decompose in to several sub tads after
//moving all dimensions (in sorted order)
//to the back.
//permuted version of the x shape info for setting up the tad problem
//decompose in to several sub tads after
//moving all dimensions (in sorted order)
//to the back.
//permuted version of the x shape info for setting up the tad problem
int *xShape = shape::shapeOf(tad->tadOnlyShapeInfo);
int *xStride = shape::stride(tad->tadOnlyShapeInfo);
Nd4jIndex tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
int rank = shape::rank(tad->tadOnlyShapeInfo);
#pragma unroll
for(Nd4jIndex i = tid; i < resultLength; i+= gridDim.x * blockDim.x) {
int offset = tad->tadOffset(i);
int shapeIter[MAX_RANK];
int coord[MAX_RANK];
int dim;
int xStridesIter[MAX_RANK];
int yStridesIter[MAX_RANK];
T *xPointer = dx + offset;
T start = this->startingValue(xPointer);
int *xShape = shape::shapeOf(xShapeInfo);
int *xStride = shape::stride(xShapeInfo);
int *yStride = shape::stride(yShapeInfo);
T startingVal = this->startingValue(dx);
Nd4jIndex n = shape::length(xShapeInfo);
int rank = shape::rank(xShapeInfo);
if(PrepareTwoRawArrayIter<T>(rank,
xShape,
dx,
xStride,
dy,
yStride,
&rank,
shapeIter,
&dx,
xStridesIter,
&dy,
yStridesIter) >= 0) {
ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); {
/* Process the innermost dimension */
T *xIter = dx;
T *yIter = dy;
startingVal = update(startingVal, op(xIter[0],yIter[0],&extraParams),&extraParams);
} ND4J_RAW_ITER_TWO_NEXT(dim,
rank,
coord,
shapeIter,
dx,
xStridesIter,
dy,
yStridesIter);
result[i] = postProcess(startingVal,n,&extraParams);
}
else {
printf("Unable to prepare array\n");
}
}
__syncthreads();
}
else {
/**
* The element wise stride belong longs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along long arr
* we can use arr.stride(1) as a representation
* along long which to iterate.
*/
Nd4jIndex xLength = shape::length(xShapeInfo);
Nd4jIndex tadLength = xLength / resultLength;
Nd4jIndex i = 0,j = 0;
/*
for (int r = blockIdx.x; r < tad->numTads; r += gridDim.x) {
if (threadIdx.x == 0)
tad->createOffsetForBlock(r);
__syncthreads();
int tadOffsetForBlock = tad->tadOffsetForBlock;
T *xVal = dx + tadOffsetForBlock;
sPartials[threadIdx.x] = this->startingValue(xVal);
for(int i = threadIdx.x; i < tad->tadLength; i+= blockDim.x) {
int xOffsetForTad = shape::tadOffset(i, xShapeInfo, dimension, dimensionLength, nullptr);
int yOffsetForTad = shape::tadOffset(i, yShapeInfo, dimension, dimensionLength, nullptr);
sPartials[threadIdx.x] = this->update(sPartials[threadIdx.x],dx[tadOffsetForBlock + i * tad->tadElementWiseStride], extraParams);
}
__syncthreads();
// aggregate. do NOT reduce for elements > tadLength
T **sPartialsRef = (T **) &sPartials;
aggregatePartials(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tad->tadLength), extraParams);
__syncthreads();
if (threadIdx.x == 0)
result[r] = this->postProcess(sPartials[threadIdx.x], tad->tadLength, extraParams);
}
*/
for(i = tid; i < resultLength; i+= blockDim.x * gridDim.x) {
int xOffsetForTad = tad->tadOffset(i);
int yOffsetForTad = xOffsetForTad;//tad->tadOffset(i);
//int xOffsetForTad = shape::tadOffset(i, xShapeInfo, dimension, dimensionLength, nullptr);
//int yOffsetForTad = shape::tadOffset(i, yShapeInfo, dimension, dimensionLength, nullptr);
sPartials[tid] = op(dx[xOffsetForTad],dy[yOffsetForTad], &extraParams);
for(j = 1; j < tadLength; j++) {
sPartials[i] = update(sPartials[i],op(dx[xOffsetForTad + xElementWiseStride * j],dy[yOffsetForTad + yElementWiseStride * j], &extraParams), &extraParams);
}
// printf("Updating result: [%i] -> [%f]\n", i, sPartials[i]);
result[i] = postProcess(sPartials[i],tadLength,&extraParams);
}
}
}
else {
printf("shifting to execScalarCuda\n");
/*
this->execScalarCuda(
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo, allocationPointer, manager, tadOnlyShapeInfo);
*/
}
}
#endif
/**
*
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
#ifdef __CUDACC__
__host__
#endif
T execScalar(
T *x,
int *xShapeInfo,
T *extraParamsVals,
T *y,
int *yShapeInfo) {
T startingVal = this->startingValue(x);
Nd4jIndex length = shape::length(xShapeInfo);
int xElementWiseStride = shape::elementWiseStride(xShapeInfo);
int yElementWiseStride = shape::elementWiseStride(yShapeInfo);
#pragma omp parallel for simd
for(int i = 0; i < this->extraParamsLength();i++) {
extraParamsVals[i] = startingVal;
}
char xOrder = shape::order(xShapeInfo);
char yOrder = shape::order(yShapeInfo);
if(xOrder == yOrder) {
if (xElementWiseStride == 1 && yElementWiseStride == 1) {
#pragma omp simd
for(int i = 0; i < length; i++) {
startingVal = update(startingVal,op(x[i],y[i],&extraParamsVals),&extraParamsVals);
}
return postProcess(startingVal, length,&(extraParamsVals));
}
else {
#pragma omp simd
for(Nd4jIndex i = 0; i < length; i++) {
startingVal = update(startingVal,op(x[i * xElementWiseStride],y[i * yElementWiseStride],&extraParamsVals),&extraParamsVals);
}
return postProcess(startingVal, length,&(extraParamsVals));
}
}
else {
int *xShape = shape::shapeOf(xShapeInfo);
int *xStride = shape::stride(xShapeInfo);
int *yStride = shape::stride(yShapeInfo);
T startingVal = this->startingValue(x);
Nd4jIndex n = shape::length(xShapeInfo);
int shapeIter[MAX_RANK];
int coord[MAX_RANK];
int dim;
int xStridesIter[MAX_RANK];
int yStridesIter[MAX_RANK];
int rank = shape::rank(xShapeInfo);
if(PrepareTwoRawArrayIter<T>(rank,
xShape,
x,
xStride,
y,
yStride,
&rank,
shapeIter,
&x,
xStridesIter,
&y,
yStridesIter) >= 0) {
ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); {
/* Process the innermost dimension */
T *xIter = x;
T *yIter = y;
startingVal = update(startingVal, op(xIter[0],yIter[0],&extraParamsVals),&extraParamsVals);
} ND4J_RAW_ITER_TWO_NEXT(dim,
rank,
coord,
shapeIter,
x,
xStridesIter,
y,
yStridesIter);
return postProcess(startingVal,n,&extraParamsVals);
}
else {
printf("Unable to prepare array\n");
}
}
return startingVal;
}
/**
*
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
#ifdef __CUDACC__
__host__
#endif
void execScalar(
T *x,
int *xShapeInfo,
T *extraParamsVals,
T *y,
int *yShapeInfo,
T *result,
int *resultShapeIfo) {
result[0] = execScalar(x,xShapeInfo,extraParamsVals,y,yShapeInfo);
}
/**
*
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
#ifdef __CUDACC__
__host__
#endif
void exec(
T *x,
int *xShapeInfo,
T *extraParamsVals,
T *y, int *yShapeInfo,
T *result, int *resultShapeInfo) {
execScalar(
x,
xShapeInfo,
extraParamsVals,
y,
yShapeInfo,
result,
resultShapeInfo);
}
/**
*
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void exec(T *x, int *xShapeInfo,
T *extraParamsVals,
T *y,
int *yShapeInfo,
T *result,
int *resultShapeInfoBuffer,
int *dimension,
int dimensionLength) {
if(shape::isScalar(resultShapeInfoBuffer)) {
execScalar(
x,
xShapeInfo,
extraParamsVals,
y,
yShapeInfo,
result,
resultShapeInfoBuffer);
return;
}
char xOrder = shape::order(xShapeInfo);
char yOrder = shape::order(yShapeInfo);
if(xOrder != yOrder) {
int shapeIter[MAX_RANK];
int coord[MAX_RANK];
int dim;
int xStridesIter[MAX_RANK];
int yStridesIter[MAX_RANK];
int *xShape = shape::shapeOf(xShapeInfo);
int *xStride = shape::stride(xShapeInfo);
int *yStride = shape::stride(yShapeInfo);
int rank = shape::rank(xShapeInfo);
if(PrepareTwoRawArrayIter<T>(rank,
xShape,
x,
xStride,
y,
yStride,
&rank,
shapeIter,
&x,
xStridesIter,
&y,
yStridesIter) >= 0) {
Nd4jIndex resultLength = shape::length(resultShapeInfoBuffer);
Nd4jIndex tadLength = shape::tadLength(xShapeInfo,dimension,dimensionLength);
ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); {
/* Process the innermost dimension */
T *xIter = x;
T *yIter = y;
Nd4jIndex xOffset = shape::getOffset(0,xShape,xStride,coord,rank);
int reductionIndex = xOffset / resultLength;
result[reductionIndex] = update(result[reductionIndex],op(xIter[0],yIter[0],&extraParamsVals),&extraParamsVals);
} ND4J_RAW_ITER_TWO_NEXT(dim,
rank,
coord,
shapeIter,
x,
xStridesIter,
y,
yStridesIter);
#pragma omp parallel for
for(Nd4jIndex i = 0; i < resultLength ;i++) {
result[i] = postProcess(result[i],tadLength,&extraParamsVals);
}
}
else {
printf("Unable to prepare array\n");
}
}
else {
T startingVal = this->startingValue(x);
Nd4jIndex resultLength = shape::length(resultShapeInfoBuffer);
shape::TAD xTad(yShapeInfo,dimension,dimensionLength);
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
/**
* The element wise stride belong longs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along long arr
* we can use arr.stride(1) as a representation
* along long which to iterate.
*/
int tadElementWiseStride = shape::elementWiseStride(xTad.tadOnlyShapeInfo);
int tadLength = shape::length(xTad.tadOnlyShapeInfo);
#pragma omp parallel for
for(Nd4jIndex i = 0; i < resultLength; i++) {
T *localExtraParams = nullptr;
if(this->extraParamsLength() > 0)
localExtraParams = new T[this->extraParamsLength()];
for(int extraParamsIdx = 0; extraParamsIdx < this->extraParamsLength(); extraParamsIdx++) {
localExtraParams[extraParamsIdx] = startingVal;
}
Nd4jIndex offset = xTad.tadOffsets[i];
result[i] = op(x[offset], y[offset],&localExtraParams);
for(int j = 1; j < tadLength; j++) {
result[i] = update(result[i],op(x[offset + tadElementWiseStride * j],y[offset + tadElementWiseStride * j], &localExtraParams), &localExtraParams);
}
result[i] = postProcess(result[i],tadLength,&localExtraParams);
if(localExtraParams != nullptr)
delete[] localExtraParams;
}
}
}
#ifdef __CUDACC__
__host__ __device__
#endif
virtual ~Reduce3() {
}
#ifdef __CUDACC__
__host__ __device__
#endif
Reduce3() {
}
};
namespace ops {
/**
* Cosine similarity between 2
* arrays
*/
template<typename T>
class CosineSimilarity: public virtual Reduce3<T> {
public:
virtual
#ifdef __CUDACC__
__inline__ __host__ __device__
#endif
T * generateExtraParams() {
T *extraParams = new T[2];
return extraParams;
}
virtual
#ifdef __CUDACC__
__inline__ __host__ __device__
#endif
void finalizeExtraParams(T **extraParams) {
delete[] *extraParams;
}
virtual
#ifdef __CUDACC__
__inline__ __host__ __device__
#endif
T startingValue(T *input) {
return 0.0;
}
#ifdef __CUDACC__
__host__ __device__
#endif
inline T postProcess(T reduction, Nd4jIndex n,T **extraParamsRef) {
T *extraParams = *extraParamsRef;
return reduction / (nd4j::math::nd4j_sqrt<T>(extraParams[0]) * nd4j::math::nd4j_sqrt<T>(extraParams[1]));
}
/**
*
* @param d1
* @param d2
* @param extraParams
* @return
*/
//an op for the kernel
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T op(T d1, T d2, T **extraParamsRef) {
T *extraParams = *extraParamsRef;
extraParams[0] += d1 * d1;
extraParams[1] += d2 * d2;
return (d1 * d2);
}
#ifdef __CUDACC__
__host__ __device__
#endif
void aggregateExtraParams(T **extraParamsTotal,T **extraParamsLocal) {
T *extraParamsTotalRef = *extraParamsTotal;
T *extraParamsLocalRef = *extraParamsLocal;
extraParamsTotalRef[0] += extraParamsLocalRef[0];
extraParamsTotalRef[1] += extraParamsLocalRef[1];
}
/**
*
* @param d1
* @param d2
* @param extraParams
* @return
*/
//an op for the kernel
#ifdef __CUDACC__
virtual __device__
inline T opAtomic(T d1, T d2, T **extraParamsRef) {
T *extraParams = *extraParamsRef;
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],d1 * d1);
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],d2 * d2);
return (d1 * d2);
}
#endif
//calculate an update of the reduce operation
/**
*
* @param old
* @param opOutput
* @param extraParams
* @return
*/
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T update(T old, T opOutput, T **extraParamsRef) {
return old + opOutput;
}
/**
*
* @param old
* @param opOutput
* @param extraParams
* @return
*/
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T merge(T old, T opOutput, T **extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
#ifdef __CUDACC__
__host__ __device__
#endif
virtual ~CosineSimilarity() {
}
#ifdef __CUDACC__
__host__ __device__
#endif
CosineSimilarity() {
this->extraParamsLen = 2;
}
};
/**
* Dot product between 2 arrays
*/
template<typename T>
class Dot: public virtual Reduce3<T> {
public:
virtual
#ifdef __CUDACC__
__inline__ __host__ __device__
#endif
T * generateExtraParams() {
return nullptr;
}
virtual
#ifdef __CUDACC__
__inline__ __host__ __device__
#endif
void finalizeExtraParams(T **extraParamsRef) {
//no-op
delete[] *extraParamsRef;
}
virtual
#ifdef __CUDACC__
__inline__ __host__ __device__
#endif
T startingValue(T *input) {
return 0.0;
}
#ifdef __CUDACC__
__host__ __device__
#endif
inline T postProcess(T reduction, Nd4jIndex n,T **extraParamsRef) {
return reduction;
}
/**
*
* @param d1
* @param d2
* @param extraParams
* @return
*/
//an op for the kernel
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T op(T d1, T d2, T **extraParamsRef) {
return d1 * d2;
}
/**
*
* @param d1
* @param d2
* @param extraParams
* @return
*/
//an op for the kernel
#ifdef __CUDACC__
virtual
__device__
inline T opAtomic(T d1, T d2, T **extraParamsRef) {
return op(d1,d2,extraParamsRef);
}
#endif
//calculate an update of the reduce operation
/**
*
* @param old
* @param opOutput
* @param extraParams
* @return
*/
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T update(T old, T opOutput, T **extraParamsRef) {
return opOutput + old;
}
/**
*
* @param old
* @param opOutput
* @param extraParams
* @return
*/
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T merge(T old, T opOutput, T **extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
#ifdef __CUDACC__
__host__ __device__
#endif
void aggregateExtraParams(T **extraParamsTotal,T **extraParamsLocal) {
//no extra params aggregation needs to happen
}
#ifdef __CUDACC__
__host__ __device__
#endif
virtual ~Dot() {
}
#ifdef __CUDACC__
__host__ __device__
#endif
Dot() {
}
};
/**
* Euclidean distance between 2 arrays
*/
template<typename T>
class EuclideanDistance: public virtual Reduce3<T> {
public:
virtual
#ifdef __CUDACC__
__inline__ __host__ __device__
#endif
T * generateExtraParams() {
return nullptr;
}
virtual
#ifdef __CUDACC__
__inline__ __host__ __device__
#endif
void finalizeExtraParams(T **extraParamsRef) {
//no-op
delete[] *extraParamsRef;
}
virtual
#ifdef __CUDACC__
__inline__ __host__ __device__
#endif
T startingValue(T *input) {
return 0.0;
}
#ifdef __CUDACC__
__host__ __device__
#endif
inline T postProcess(T reduction, Nd4jIndex n,T **extraParamsRef) {
return nd4j::math::nd4j_sqrt<T>(reduction);
}
/**
*
* @param d1
* @param d2
* @param extraParams
* @return
*/
//an op for the kernel
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T op(T d1, T d2, T **extraParamsRef) {
T ret = d1 - d2;
return ret * ret;
}
/**
*
* @param d1
* @param d2
* @param extraParams
* @return
*/
//an op for the kernel
#ifdef __CUDACC__
virtual
__device__
inline T opAtomic(T d1, T d2, T **extraParamsRef) {
return op(d1,d2,extraParamsRef);
}
#endif
//calculate an update of the reduce operation
/**
*
* @param old
* @param opOutput
* @param extraParams
* @return
*/
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T update(T old, T opOutput, T **extraParamsRef) {
return opOutput + old;
}
/**
*
* @param old
* @param opOutput
* @param extraParams
* @return
*/
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T merge(T old, T opOutput, T **extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
#ifdef __CUDACC__
__host__ __device__
#endif
void aggregateExtraParams(T **extraParamsTotal,T **extraParamsLocal) {
//no extra params aggregation needs to happen
}
#ifdef __CUDACC__
__host__ __device__
#endif
virtual ~EuclideanDistance() {
}
#ifdef __CUDACC__
__host__ __device__
#endif
EuclideanDistance() {
}
};
/**
* Manhattan distance between 2 arrays
*/
template<typename T>
class ManhattanDistance: public virtual Reduce3<T> {
public:
virtual
#ifdef __CUDACC__
__inline__ __host__ __device__
#endif
T * generateExtraParams() {
return nullptr;
}
virtual
#ifdef __CUDACC__
__inline__ __host__ __device__
#endif
void finalizeExtraParams(T **extraParamsRef) {
//no op
delete[] *extraParamsRef;
}
virtual
#ifdef __CUDACC__
__inline__ __host__ __device__
#endif
T startingValue(T *input) {
return 0.0;
}
#ifdef __CUDACC__
__host__ __device__
#endif
inline T postProcess(T reduction, Nd4jIndex n,T **extraParamsRef) {
return reduction;
}
/**
*
* @param d1
* @param d2
* @param extraParams
* @return
*/
//an op for the kernel
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T op(T d1, T d2, T **extraParamsRef) {
return nd4j::math::nd4j_abs<T>(d1 - d2);
}
//calculate an update of the reduce operation
/**
*
* @param old
* @param opOutput
* @param extraParams
* @return
*/
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T update(T old, T opOutput, T **extraParamsRef) {
return old + opOutput;
}
#ifdef __CUDACC__
__host__ __device__
#endif
void aggregateExtraParams(T **extraParamsTotal,T **extraParamsLocal) {
//no extra params aggregation needs to happen
}
/**
*
* @param d1
* @param d2
* @param extraParams
* @return
*/
//an op for the kernel
#ifdef __CUDACC__
virtual __device__
inline T opAtomic(T d1, T d2, T **extraParamsRef) {
return op(d1,d2,extraParamsRef);
}
#endif
/**
*
* @param old
* @param opOutput
* @param extraParams
* @return
*/
virtual
#ifdef __CUDACC__
__host__ __device__
#endif
inline T merge(T old, T opOutput, T **extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
#ifdef __CUDACC__
__host__ __device__
#endif
virtual ~ManhattanDistance() {
}
#ifdef __CUDACC__
__host__ __device__
#endif
ManhattanDistance() {
}
};
}
template<typename T>
class Reduce3OpFactory {
public:
#ifdef __CUDACC__
__host__ __device__
#endif
Reduce3OpFactory() {
}
/**
* Create an op given an op number
* @param op the op number
* 0: manhattan distance
* 1: euclidean distance
* 2: cosine similarity
* @return
*/
#ifdef __CUDACC__
__inline__ __device__
Reduce3<T> * getOp(int op, unsigned char *buffer) {
#else
Reduce3<T> * getOp(int op) {
#endif
if (op == 0)
#ifdef __CUDACC__
return new(buffer) functions::reduce3::ops::ManhattanDistance<T>();
#else
return new functions::reduce3::ops::ManhattanDistance<T>();
#endif
else if (op == 1)
#ifdef __CUDACC__
return new(buffer) functions::reduce3::ops::EuclideanDistance<T>();
#else
return new functions::reduce3::ops::EuclideanDistance<T>();
#endif
else if (op == 2)
#ifdef __CUDACC__
return new(buffer) functions::reduce3::ops::CosineSimilarity<T>();
#else
return new functions::reduce3::ops::CosineSimilarity<T>();
#endif
else if (op == 3)
#ifdef __CUDACC__
return new(buffer) functions::reduce3::ops::Dot<T>();
#else
return new functions::reduce3::ops::Dot<T>();
#endif
return nullptr;
}
};
}
}
#ifdef __CUDACC__
template <typename T>
__inline__ __device__ void reduce3NoElementWiseStrideGeneric(
int opNum,
T *dx,
int *xShapeInfo,
T *dy,
int *yShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) {
__shared__ functions::reduce3::Reduce3<T> * op;
__shared__ functions::reduce3::Reduce3OpFactory<T> *reduce3OpFactory;
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), sizeof(functions::reduce3::Reduce3OpFactory<T>), sizeof(functions::reduce3::Reduce3<T>), sizeof(shape::TAD), shape::rank(xShapeInfo));
reduce3OpFactory = new(manager->getFactorySpace()) functions::reduce3::Reduce3OpFactory<T>();
op = reduce3OpFactory->getOp(opNum, manager->getFunctionSpace());
}
__syncthreads();
op->transformNoElementWiseStride(
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
postProcessOrNot,
allocationPointer,
manager, tadOnlyShapeInfo);
}
__global__ void reduce3NoElementWiseStrideDouble(
int opNum,
double *dx,
int *xShapeInfo,
double *dy,
int *yShapeInfo,
double *extraParams,
double *result,
int *resultShapeInfo,
int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) {
reduce3NoElementWiseStrideGeneric<double>(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
postProcessOrNot, allocationPointer, tadOnlyShapeInfo);
}
__global__ void reduce3NoElementWiseStrideFloat(
int opNum,
float *dx,
int *xShapeInfo,
float *dy,
int *yShapeInfo,
float *extraParams,
float *result,
int *resultShapeInfo,
int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) {
reduce3NoElementWiseStrideGeneric<float>(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
postProcessOrNot, allocationPointer, tadOnlyShapeInfo);
}
/**
* The driver api
* @param opNum the number
* @param n the length of the reduce
* @param dx the input data
* @param xShapeInfo the shape information
* @param dy the pair wise reduce
* @param yShapeInfo the shape information for y
* @param extraParams the extra parameters in the operation
* @param result where to store the result
* @param resultShapeInfo the shape information
* @param gpuInformation the gpu information
* @param dimension the dimension to reduce along long
* @param dimensionLength the dimension length
* @param postProcessOrNot whether to post
*/
template <typename T>
__device__ void reduce3Generic(
int opNum,
T *dx,
int *xShapeInfo,
T *dy,
int *yShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) {
__shared__ functions::reduce3::Reduce3<T> * op;
__shared__ functions::reduce3::Reduce3OpFactory<T> *reduce3OpFactory;
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), sizeof(functions::reduce3::Reduce3OpFactory<T>), sizeof(functions::reduce3::Reduce3<T>), sizeof(shape::TAD), shape::rank(xShapeInfo));
reduce3OpFactory = new(manager->getFactorySpace()) functions::reduce3::Reduce3OpFactory<T>();
op = reduce3OpFactory->getOp(opNum, manager->getFunctionSpace());
}
__syncthreads();
op->transform(
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot, allocationPointer, manager, tadOnlyShapeInfo);
}
template <typename T>
__device__ void reduce3ScalarGeneric(
int opNum,
T *dx,
int *xShapeInfo,
T *dy,
int *yShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *allocationPointer, int *tadOnlyShapeInfo) {
__shared__ functions::reduce3::Reduce3<T> * op;
__shared__ functions::reduce3::Reduce3OpFactory<T> *reduce3OpFactory;
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), sizeof(functions::reduce3::Reduce3OpFactory<T>), sizeof(functions::reduce3::Reduce3<T>), sizeof(shape::TAD), shape::rank(xShapeInfo));
reduce3OpFactory = new(manager->getFactorySpace()) functions::reduce3::Reduce3OpFactory<T>();
op = reduce3OpFactory->getOp(opNum, manager->getFunctionSpace());
}
__syncthreads();
op->execScalarCuda(
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
allocationPointer, manager, tadOnlyShapeInfo);
}
/**
* The driver api
* @param opNum the number
* @param n the length of the reduce
* @param dx the input data
* @param xShapeInfo the shape information
* @param dy the pair wise reduce
* @param yShapeInfo the shape information for y
* @param extraParams the extra parameters in the operation
* @param result where to store the result
* @param resultShapeInfo the shape information
* @param dimension the dimension to reduce along long
* @param dimensionLength the dimension length
* @param postProcessOrNot whether to post [
*/
extern "C"
__global__ void reduce3Double(
int opNum,
double *dx,
int *xShapeInfo,
double *dy,
int *yShapeInfo,
double *extraParams,
double *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) {
reduce3Generic<double>(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot, allocationPointer, tadOnlyShapeInfo);
}
/**
* The driver api
* @param opNum the number
* @param n the length of the reduce
* @param dx the input data
* @param xShapeInfo the shape information
* @param dy the pair wise reduce
* @param yShapeInfo the shape information for y
* @param extraParams the extra parameters in the operation
* @param result where to store the result
* @param resultShapeInfo the shape information
* @param gpuInformation the gpu information
* @param dimension the dimension to reduce along long
* @param dimensionLength the dimension length
* @param postProcessOrNot whether to post [
*/
extern "C"
__global__ void reduce3Float(
int opNum,
float *dx,
int *xShapeInfo,
float *dy,
int *yShapeInfo,
float *extraParams,
float *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) {
reduce3Generic<float>(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot, allocationPointer, tadOnlyShapeInfo);
}
extern "C"
__global__ void reduce3ScalarFloat(
int opNum,
float *dx,
int *xShapeInfo,
float *dy,
int *yShapeInfo,
float *extraParams,
float *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) {
reduce3ScalarGeneric<float>(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
allocationPointer, tadOnlyShapeInfo);
}
extern "C"
__global__ void reduce3ScalarDouble(
int opNum,
double *dx,
int *xShapeInfo,
double *dy,
int *yShapeInfo,
double *extraParams,
double *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) {
reduce3ScalarGeneric<double>(
opNum,
dx,
xShapeInfo,
dy,
yShapeInfo,
extraParams,
result,
resultShapeInfo,
allocationPointer, tadOnlyShapeInfo);
}
#endif
#endif /* REDUCE3_H_ */
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "ios_error.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _DoublePixelPacket
{
double
red,
green,
blue,
alpha;
} DoublePixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
double
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
double
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
double
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *,ExceptionInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *,ExceptionInfo *),
SetGrayscaleImage(Image *,ExceptionInfo *),
SetImageColormap(Image *,CubeInfo *,ExceptionInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DefineImageColormap(Image *,CubeInfo *,NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info));
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither_method=image_info->dither == MagickFalse ?
NoDitherMethod : RiemersmaDitherMethod;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const Image *image,
const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(GetPixelAlpha(image,pixel) == OpaqueAlpha))
{
alpha_pixel->red=(double) GetPixelRed(image,pixel);
alpha_pixel->green=(double) GetPixelGreen(image,pixel);
alpha_pixel->blue=(double) GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel));
alpha_pixel->red=alpha*GetPixelRed(image,pixel);
alpha_pixel->green=alpha*GetPixelGreen(image,pixel);
alpha_pixel->blue=alpha*GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
}
static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info,
const PixelInfo *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->alpha == OpaqueAlpha))
{
alpha_pixel->red=(double) pixel->red;
alpha_pixel->green=(double) pixel->green;
alpha_pixel->blue=(double) pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
return;
}
alpha=(double) (QuantumScale*pixel->alpha);
alpha_pixel->red=alpha*pixel->red;
alpha_pixel->green=alpha*pixel->green;
alpha_pixel->blue=alpha*pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) |
((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3;
return(id);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
if (SetImageColormap(image,cube_info,exception) == MagickFalse)
return(MagickFalse);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 :
QuantumRange;
if (image->colors > 1)
{
intensity=0.0;
if (GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1))
intensity=(double) QuantumRange;
}
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace,exception);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue :
MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
double
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if (cube_info->quantize_info->colorspace != image->colorspace)
{
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace,exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace,
exception);
}
midpoint.red=(double) QuantumRange/2.0;
midpoint.green=(double) QuantumRange/2.0;
midpoint.blue=(double) QuantumRange/2.0;
midpoint.alpha=(double) QuantumRange/2.0;
error.alpha=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if (cube_info->quantize_info->colorspace != image->colorspace)
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
pixel;
register double
alpha,
beta,
distance;
register DoublePixelPacket
*magick_restrict q;
register PixelInfo
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(double) (QuantumScale*p->alpha);
beta=(double) (QuantumScale*q->alpha);
}
pixel=alpha*p->red-beta*q->red;
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->green-beta*q->green;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->blue-beta*q->blue;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=p->alpha-q->alpha;
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image,
ExceptionInfo *exception)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the DefineImageColormap method is:
%
% void DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register double
alpha;
register PixelInfo
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(double) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
q->alpha=(double) OpaqueAlpha;
}
else
{
double
opacity;
opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha);
q->alpha=(double) ClampToQuantum(opacity);
if (q->alpha == OpaqueAlpha)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
}
else
{
double
gamma;
gamma=(double) (QuantumScale*q->alpha);
gamma=PerceptibleReciprocal(gamma);
q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.blue);
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2*
sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
const char
*artifact;
double
amount;
DoublePixelPacket
**pixels;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
status=MagickTrue;
amount=1.0;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
amount=StringToDoubleInterval(artifact,1.0);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register Quantum
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel);
if (x > 0)
{
pixel.red+=7.0*amount*current[u-v].red/16;
pixel.green+=7.0*amount*current[u-v].green/16;
pixel.blue+=7.0*amount*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=7.0*amount*current[u-v].alpha/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=previous[u+v].alpha/16;
}
pixel.red+=5.0*amount*previous[u].red/16;
pixel.green+=5.0*amount*previous[u].green/16;
pixel.blue+=5.0*amount*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=5.0*amount*previous[u].alpha/16;
if (x > 0)
{
pixel.red+=3.0*amount*previous[u-v].red/16;
pixel.green+=3.0*amount*previous[u-v].green/16;
pixel.blue+=3.0*amount*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=3.0*amount*previous[u-v].alpha/16;
}
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
node_id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
node_id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[node_id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[node_id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image));
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),
q+u*GetPixelChannels(image));
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),
q+u*GetPixelChannels(image));
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),
q+u*GetPixelChannels(image));
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),
q+u*GetPixelChannels(image));
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixelInfo(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].alpha=pixel.alpha-color.alpha;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int,
ExceptionInfo *);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction,ExceptionInfo *exception)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
register Quantum
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
AssociateAlphaPixel(image,cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha+=p->weights[i]*p->error[i].alpha;
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(double) (4.0*(QuantumRange+1.0)*((double)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) p->cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q);
if (cube_info->associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info,exception));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
double
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither_method == NoDitherMethod)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(image,p);
if (image->alpha_trait == BlendPixelTrait)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*image->colormap[index].alpha);
}
distance=fabs((double) (alpha*GetPixelRed(image,p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K m e a n s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KmeansImage() applies k-means color reduction to an image. This is a
% colorspace clustering or segmentation technique.
%
% The format of the KmeansImage method is:
%
% MagickBooleanType KmeansImage(Image *image,const size_t number_colors,
% const size_t max_iterations,const double tolerance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_colors: number of colors to use as seeds.
%
% o max_iterations: maximum number of iterations while converging.
%
% o tolerance: the maximum tolerance.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _KmeansInfo
{
double
red,
green,
blue,
alpha,
black,
count,
distortion;
} KmeansInfo;
static KmeansInfo **DestroyKmeansThreadSet(KmeansInfo **kmeans_info)
{
register ssize_t
i;
assert(kmeans_info != (KmeansInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (kmeans_info[i] != (KmeansInfo *) NULL)
kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]);
kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info);
return(kmeans_info);
}
static KmeansInfo **AcquireKmeansThreadSet(const size_t number_colors)
{
KmeansInfo
**kmeans_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads,
sizeof(*kmeans_info));
if (kmeans_info == (KmeansInfo **) NULL)
return((KmeansInfo **) NULL);
(void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors,
sizeof(**kmeans_info));
if (kmeans_info[i] == (KmeansInfo *) NULL)
return(DestroyKmeansThreadSet(kmeans_info));
}
return(kmeans_info);
}
static inline double KmeansMetric(const Image *magick_restrict image,
const Quantum *magick_restrict p,const PixelInfo *magick_restrict q)
{
register double
gamma,
metric,
pixel;
gamma=1.0;
metric=0.0;
if ((image->alpha_trait != UndefinedPixelTrait) ||
(q->alpha_trait != UndefinedPixelTrait))
{
pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ?
q->alpha : OpaqueAlpha);
metric+=pixel*pixel;
if (image->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*GetPixelAlpha(image,p);
if (q->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*q->alpha;
}
if (image->colorspace == CMYKColorspace)
{
pixel=QuantumScale*(GetPixelBlack(image,p)-q->black);
metric+=gamma*pixel*pixel;
gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p));
gamma*=QuantumScale*(QuantumRange-q->black);
}
metric*=3.0;
pixel=QuantumScale*(GetPixelRed(image,p)-q->red);
if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse)
{
if (fabs((double) pixel) > 0.5)
pixel-=0.5;
pixel*=2.0;
}
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelGreen(image,p)-q->green);
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue);
metric+=gamma*pixel*pixel;
return(metric);
}
MagickExport MagickBooleanType KmeansImage(Image *image,
const size_t number_colors,const size_t max_iterations,const double tolerance,
ExceptionInfo *exception)
{
#define KmeansImageTag "Kmeans/Image"
#define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info))
CacheView
*image_view;
const char
*colors;
double
previous_tolerance;
KmeansInfo
**kmeans_pixels;
MagickBooleanType
verbose,
status;
register ssize_t
n;
size_t
number_threads;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colors=GetImageArtifact(image,"kmeans:seed-colors");
if (colors == (const char *) NULL)
{
CubeInfo
*cube_info;
QuantizeInfo
*quantize_info;
size_t
colors,
depth;
/*
Seed clusters from color quantization.
*/
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->colorspace=image->colorspace;
quantize_info->number_colors=number_colors;
quantize_info->dither_method=NoDitherMethod;
colors=number_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
cube_info=GetCubeInfo(quantize_info,depth,number_colors);
if (cube_info == (CubeInfo *) NULL)
{
quantize_info=DestroyQuantizeInfo(quantize_info);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=SetImageColormap(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
quantize_info=DestroyQuantizeInfo(quantize_info);
if (status == MagickFalse)
return(status);
}
else
{
char
color[MagickPathExtent];
register const char
*p;
/*
Seed clusters from color list (e.g. red;green;blue).
*/
status=AcquireImageColormap(image,number_colors,exception);
if (status == MagickFalse)
return(status);
for (n=0, p=colors; n < (ssize_t) image->colors; n++)
{
register const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,image->colormap+n,
exception);
if (*q == '\0')
{
n++;
break;
}
p=q+1;
}
if (n < (ssize_t) image->colors)
{
RandomInfo
*random_info;
/*
Seed clusters from random values.
*/
random_info=AcquireRandomInfo();
for ( ; n < (ssize_t) image->colors; n++)
{
(void) QueryColorCompliance("#000",AllCompliance,image->colormap+n,
exception);
image->colormap[n].red=RandomColorComponent(random_info);
image->colormap[n].green=RandomColorComponent(random_info);
image->colormap[n].blue=RandomColorComponent(random_info);
if (image->alpha_trait != BlendPixelTrait)
image->colormap[n].alpha=RandomColorComponent(random_info);
if (image->colorspace == CMYKColorspace)
image->colormap[n].black=RandomColorComponent(random_info);
}
random_info=DestroyRandomInfo(random_info);
}
}
/*
Iterative refinement.
*/
kmeans_pixels=AcquireKmeansThreadSet(number_colors);
if (kmeans_pixels == (KmeansInfo **) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
previous_tolerance=0.0;
verbose=IsStringTrue(GetImageArtifact(image,"debug"));
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
image_view=AcquireAuthenticCacheView(image,exception);
for (n=0; n < (ssize_t) max_iterations; n++)
{
double
distortion;
register ssize_t
i;
ssize_t
y;
for (i=0; i < (ssize_t) number_threads; i++)
(void) memset(kmeans_pixels[i],0,image->colors*sizeof(*kmeans_pixels[i]));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
min_distance;
register ssize_t
i;
ssize_t
j;
/*
Assign each pixel whose mean has the least squared color distance.
*/
j=0;
min_distance=KmeansMetric(image,q,image->colormap+0);
for (i=1; i < (ssize_t) image->colors; i++)
{
double
distance;
if (min_distance <= MagickEpsilon)
break;
distance=KmeansMetric(image,q,image->colormap+i);
if (distance < min_distance)
{
min_distance=distance;
j=i;
}
}
kmeans_pixels[id][j].red+=QuantumScale*GetPixelRed(image,q);
kmeans_pixels[id][j].green+=QuantumScale*GetPixelGreen(image,q);
kmeans_pixels[id][j].blue+=QuantumScale*GetPixelBlue(image,q);
if (image->alpha_trait != BlendPixelTrait)
kmeans_pixels[id][j].alpha+=QuantumScale*GetPixelAlpha(image,q);
if (image->colorspace == CMYKColorspace)
kmeans_pixels[id][j].black+=QuantumScale*GetPixelBlack(image,q);
kmeans_pixels[id][j].count++;
kmeans_pixels[id][j].distortion+=min_distance;
SetPixelIndex(image,(Quantum) j,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
if (status == MagickFalse)
break;
/*
Reduce sums to [0] entry.
*/
for (i=1; i < (ssize_t) number_threads; i++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) image->colors; j++)
{
kmeans_pixels[0][j].red+=kmeans_pixels[i][j].red;
kmeans_pixels[0][j].green+=kmeans_pixels[i][j].green;
kmeans_pixels[0][j].blue+=kmeans_pixels[i][j].blue;
if (image->alpha_trait != BlendPixelTrait)
kmeans_pixels[0][j].alpha+=kmeans_pixels[i][j].alpha;
if (image->colorspace == CMYKColorspace)
kmeans_pixels[0][j].black+=kmeans_pixels[i][j].black;
kmeans_pixels[0][j].count+=kmeans_pixels[i][j].count;
kmeans_pixels[0][j].distortion+=kmeans_pixels[i][j].distortion;
}
}
/*
Calculate the new means (centroids) of the pixels in the new clusters.
*/
distortion=0.0;
for (i=0; i < (ssize_t) image->colors; i++)
{
double
gamma;
gamma=PerceptibleReciprocal((double) kmeans_pixels[0][i].count);
image->colormap[i].red=gamma*QuantumRange*kmeans_pixels[0][i].red;
image->colormap[i].green=gamma*QuantumRange*kmeans_pixels[0][i].green;
image->colormap[i].blue=gamma*QuantumRange*kmeans_pixels[0][i].blue;
if (image->alpha_trait != BlendPixelTrait)
image->colormap[i].alpha=gamma*QuantumRange*kmeans_pixels[0][i].alpha;
if (image->colorspace == CMYKColorspace)
image->colormap[i].black=gamma*QuantumRange*kmeans_pixels[0][i].black;
distortion+=kmeans_pixels[0][i].distortion;
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(thread_stderr,"distortion[%.20g]: %*g %*g\n",(double) n,
GetMagickPrecision(),distortion,GetMagickPrecision(),
fabs(distortion-previous_tolerance));
if (fabs(distortion-previous_tolerance) <= tolerance)
break;
previous_tolerance=distortion;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n,
max_iterations);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
kmeans_pixels=DestroyKmeansThreadSet(kmeans_pixels);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
(void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType)
max_iterations-1,max_iterations);
if (status == MagickFalse)
return(status);
return(SyncImage(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const DitherMethod dither_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither_method: choose from UndefinedDitherMethod, NoDitherMethod,
% RiemersmaDitherMethod, FloydSteinbergDitherMethod.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const DitherMethod dither_method,ExceptionInfo *exception)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \
MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double)
PosterizePixel(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double)
PosterizePixel(image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double)
PosterizePixel(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double)
PosterizePixel(image->colormap[i].alpha);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait == BlendPixelTrait))
SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither_method=dither_method;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.alpha+=node_info->total_color.alpha;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->alpha_trait != BlendPixelTrait)
{
if (SetImageGray(image,exception) != MagickFalse)
(void) SetGrayscaleImage(image,exception);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2))
depth--;
if ((image->alpha_trait == BlendPixelTrait) && (depth > 5))
depth--;
if (SetImageGray(image,exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images,exception);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither_method != NoDitherMethod)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% double *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,double *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int QuantizeErrorCompare(const void *error_p,const void *error_q)
{
double
*p,
*q;
p=(double *) error_p;
q=(double *) error_q;
if (*p > *q)
return(1);
if (fabs(*q-*p) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
double
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (double *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(double),
QuantizeErrorCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(double *) RelinquishMagickMemory(quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest of the colors
% from the reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images,exception);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelInfo
*color_1,
*color_2;
color_1=(PixelInfo *) x;
color_2=(PixelInfo *) y;
intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)-
GetPixelInfoIntensity((const Image *) NULL,color_2);
if (intensity < (double) INT_MIN)
intensity=(double) INT_MIN;
if (intensity > (double) INT_MAX)
intensity=(double) INT_MAX;
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
register ssize_t
i;
size_t
extent;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColormap() traverses the color cube tree and sets the colormap of
% the image. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the SetImageColormap method is:
%
% MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
% ExceptionInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
size_t
number_colors;
number_colors=MagickMax(cube_info->maximum_colors,cube_info->colors);
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
DefineImageColormap(image,cube_info,cube_info->root);
if (image->colors != number_colors)
{
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors+1,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
return(MagickTrue);
}
|
GB_unaryop__ainv_uint32_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint32_bool
// op(A') function: GB_tran__ainv_uint32_bool
// C type: uint32_t
// A type: bool
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint32_bool
(
uint32_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
smooth.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "../admm.h"
/******************************************************************************
* TYPES
*****************************************************************************/
typedef struct
{
val_t lambda;
matrix_t * transpose_buffer;
matrix_t * banded;
int * pivot;
} smooth_ws;
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Initialize the banded matrix B^T * B, where B is a tri-diagonal matrix
* with 2's on the diagonal and 1's on the sub/super-diagonals. The
* result is a banded matrix with bandwidth 2, stored col-major:
*
* 0 0 0 0 0
* 0 0 0 0 0
* 5 -4 1 0 0
* rho * (lambda * -4 6 -4 1 0 + diag(rho))
* 1 -4 6 -4 1
* 0 1 -4 6 -4
* 0 0 1 -4 5
*
* The additional 2 rows at the top are for fill-in during LU
* factorization.
*
* This routine then computes the LU factorization of this matrix using
* DGBTRF.
*
* @param[out] smooth The smoothness workspace to initialize.
* @param I The dimension of the mode with smoothness.
* @param rho The current penalty term of the ADMM iteration.
*/
static void p_form_banded(
smooth_ws * const smooth,
idx_t const I,
val_t const rho)
{
val_t * vals = smooth->banded->vals;
val_t const lambda = smooth->lambda;
/* first column is special */
vals[2+2+0] = (5. * lambda) + rho;
vals[2+2+1] = -4. * lambda;
vals[2+2+2] = 1. * lambda;
/* all columns except the last */
idx_t const nrows = smooth->banded->I; /* account for extra rows */
for(idx_t i=1; i < I-1; ++i) {
vals += nrows;
/* offset into current column */
if(i > 1) {
vals[2+0] = 1. * lambda;
}
vals[2+1] = -4. * lambda;
vals[2+2] = (6. * lambda) + rho; /* add rho to diagonal */
vals[2+3] = -4. * lambda;
if(i < I-2) {
vals[2+4] = 1. * lambda;
}
}
/* last column is special, too */
vals += nrows;
vals[2+0] = 1. * lambda;
vals[2+1] = -4. * lambda;
vals[2+2] = (5. * lambda) + rho;
/* compute the LU factorization */
int nbands = 2;
int M = (int) I;
int N = (int) I;
int KL = (int) nbands;
int KU = (int) nbands;
int lda = (int) (2 * nbands) + nbands + 1;
int info = 0;
LAPACK_DGBTRF(&M, &N, &KL, &KU, smooth->banded->vals, &lda, smooth->pivot, &info);
if(info) {
fprintf(stderr, "SPLATT: DGBTRF returned %d\n", info);
}
}
void splatt_smooth_init(
splatt_val_t * vals,
splatt_idx_t const nrows,
splatt_idx_t const ncols,
void * data)
{
smooth_ws * ws = data;
/* This will be a matrix stored in LAPACK banded format. We allocate
* diagonal + upper/lower bands + another 2 bands for LU fill-in. */
int const nbands = 2;
ws->banded = mat_alloc(1 + (nbands * 3), nrows);
ws->banded->rowmajor = 0;
ws->pivot = splatt_malloc(nrows* sizeof(*ws->pivot));
ws->transpose_buffer = mat_alloc(nrows, ncols);
}
/**
* @brief Apply the proximity operator to enforce column smoothness. This solves
* a banded linear system and performs a few matrix transposes.
*
* @param[out] primal The row-major matrix to update.
* @param nrows The number of rows in primal.
* @param ncols The number of columns in primal.
* @param offset Not used.
* @param data Workspace.
* @param rho Multiplier on the regularization.
* @param should_parallelize If true, parallelize.
*/
void splatt_smooth_prox(
val_t * primal,
idx_t const nrows,
idx_t const ncols,
idx_t const offset,
void * data,
val_t const rho,
bool const should_parallelize)
{
assert(offset == 0);
smooth_ws * ws = data;
val_t * const restrict buf = ws->transpose_buffer->vals;
/* form the banded matrix and compute its LU factorization */
p_form_banded(ws, nrows, rho);
/* transpose the RHS (primal) */
#pragma omp parallel if(should_parallelize)
{
for(idx_t j=0; j < ncols; ++j) {
#pragma omp for schedule(static) nowait
for(idx_t i=0; i < nrows; ++i) {
idx_t const old = j + (i*ncols);
idx_t const new = i + (j*nrows);
buf[new] = primal[old];
}
}
} /* end omp parallel */
/* solve the linear system of equations */
char trans = 'N';
int N = (int) nrows;
int KL = 2;
int KU = 2;
int nrhs = (int) ncols;
int lda = (int) (2 * KL) + KU + 1;
int ldb = N;
int info;
LAPACK_DGBTRS(&trans, &N, &KL, &KU, &nrhs, ws->banded->vals, &lda,
ws->pivot, buf, &ldb, &info);
if(info) {
fprintf(stderr, "SPLATT: DGBTRS returned %d\n", info);
}
/* now transpose back and multiply by rho */
#pragma omp parallel for schedule(static) if(should_parallelize)
for(idx_t i=0; i < nrows; ++i) {
for(idx_t j=0; j < ncols; ++j) {
idx_t const old = i + (j*nrows);
idx_t const new = j + (i*ncols);
primal[new] = buf[old] * rho;
}
}
}
/**
* @brief Free the smoothness workspace.
*
* @param data The data to free.
*/
void splatt_smooth_free(
void * data)
{
smooth_ws * ws = data;
mat_free(ws->banded);
mat_free(ws->transpose_buffer);
splatt_free(ws->pivot);
splatt_free(ws);
}
/******************************************************************************
* API FUNCTIONS
*****************************************************************************/
splatt_error_type splatt_register_smooth(
splatt_cpd_opts * opts,
splatt_val_t const multiplier,
splatt_idx_t const * const modes_included,
splatt_idx_t const num_modes)
{
for(idx_t m=0; m < num_modes; ++m) {
idx_t const mode = modes_included[m];
splatt_cpd_constraint * con = splatt_alloc_constraint(SPLATT_CON_ADMM);
con->prox_func = splatt_smooth_prox;
con->init_func = splatt_smooth_init;
con->free_func = splatt_smooth_free;
sprintf(con->description, "SMOOTH-COL (%0.1e)", multiplier);
smooth_ws * ws = splatt_malloc(sizeof(*ws));
ws->lambda = multiplier;
con->data = ws;
splatt_register_constraint(opts, mode, con);
}
return SPLATT_SUCCESS;
}
|
eltwise_layernorm.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Evangelos Georganas (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#include <immintrin.h>
/* include c-based dnn library */
#include "../deeplearning/common/dnn_common.h"
#if defined(_OPENMP)
# include <omp.h>
#endif
#define EPS 1e-9
LIBXSMM_INLINE
void sfill_matrix ( float *matrix, unsigned int ld, unsigned int m, unsigned int n )
{
unsigned int i, j;
double dtmp;
if ( ld < m )
{
fprintf(stderr,"Error is sfill_matrix: ld=%u m=%u mismatched!\n",ld,m);
exit(EXIT_FAILURE);
}
for ( j = 1; j <= n; j++ )
{
/* Fill through the leading dimension */
for ( i = 1; i <= ld; i++ )
{
dtmp = 1.0 - 2.0*libxsmm_rng_f64();
matrix [ (j-1)*ld + (i-1) ] = (float) dtmp;
}
}
}
LIBXSMM_INLINE
void naive_layernorm(int m, int n, int ld_in, float *sinp, float *gamma, float *beta, float *sout_ref, float *mean_data_ref, float *rstd_data_ref)
{
int i, j;
#if defined(_OPENMP)
#pragma omp parallel for private(j)
#endif
for (j = 0; j < n; j++) {
float mean_val_ref = 0, rstd_val_ref = 0, scale_ref = 0, bias_ref = 0, gamma_val_ref = 0, beta_val_ref = 0;
mean_data_ref[j] = 0;
rstd_data_ref[j] = 0;
for (i = 0; i < m; i++) {
mean_data_ref[j] += sinp[j*ld_in + i];
rstd_data_ref[j] += sinp[j*ld_in + i] * sinp[j*ld_in + i];
}
mean_val_ref = mean_data_ref[j]/m;
rstd_val_ref = (rstd_data_ref[j]/m)-mean_val_ref*mean_val_ref;
rstd_val_ref = 1/((float)sqrt(rstd_val_ref));
mean_data_ref[j] = mean_val_ref;
rstd_data_ref[j] = rstd_val_ref;
scale_ref = rstd_val_ref;
bias_ref = -1.f * rstd_val_ref * mean_val_ref;
for (i = 0; i < m; i++) {
gamma_val_ref = gamma[i];
beta_val_ref = beta[i];
sout_ref[j*ld_in+i] += (sinp[j*ld_in+i] * scale_ref + bias_ref) * gamma_val_ref + beta_val_ref;
}
}
}
LIBXSMM_INLINE
void naive_layernorm_bwd(int m, int n, int ld_in, float *dY, float *X, float *mean, float *rstd, float *gamma, float *dX, float *dgamma, float *dbeta)
{
float a, b, c, ds, db, scale = (float)(1.0 / m);
int i, j;
for (i = 0; i < m; i++) {
dgamma[i] = 0;
dbeta[i] = 0;
}
for (j = 0; j < n; j++) {
a = rstd[j];
b = -1.f * a * mean[j];
ds = 0;
db = 0;
for (i = 0; i < m; i++) {
dgamma[i] += dY[j*ld_in+i] * (a * X[j*ld_in+i] + b);
dbeta[i] += dY[j*ld_in+i];
ds += dY[j*ld_in+i] * X[j*ld_in+i] * gamma[i];
db += dY[j*ld_in+i] * gamma[i];
}
b = (db * mean[j] - ds) * a * a * a * scale;
c = -1.f * b * mean[j] - db * a * scale;
for (i = 0; i < m; i++) {
dX[j*ld_in+i] = a * dY[j*ld_in+i] * gamma[i] + b * X[j*ld_in+i] + c;
}
}
}
#if 0
LIBXSMM_INLINE
void optimized_layernorm(int m, int n, int ld_in, float *sinp, float *gamma, float *beta, float *sout, float *mean_data, float *rstd_data, libxsmm_meltwfunction_reduce reduce_kernel, libxsmm_meltwfunction_scale scalemean_kernel, libxsmm_meltwfunction_scale scaleout_kernel, float * bias_aux)
{
int i;
float reverse_m = (float)(1.0 / m);
#if defined(__AVX512F__)
__m512 minus_ones = _mm512_set1_ps(-1.f);
#endif
libxsmm_meltw_reduce_param reduce_params;
libxsmm_meltw_scale_param scalemean_params;
libxsmm_meltw_scale_param scaleout_params;
reduce_params.in_ptr = sinp;
reduce_params.out_ptr_0 = mean_data;
reduce_params.out_ptr_1 = rstd_data;
reduce_kernel(&reduce_params);
scalemean_params.in_ptr = mean_data;
scalemean_params.out_ptr = mean_data;
scalemean_params.scale_vals_ptr = &reverse_m;
scalemean_kernel(&scalemean_params);
scalemean_params.in_ptr = rstd_data;
scalemean_params.out_ptr = rstd_data;
scalemean_kernel(&scalemean_params);
/* Calculate rstd and auxiliary bias vectors*/
#if defined(__AVX512F__)
for (i = 0; i < n-15; i+= 16) {
__m512 vrstd = _mm512_loadu_ps(rstd_data+i);
__m512 vmean = _mm512_loadu_ps(mean_data+i);
vrstd = _mm512_rsqrt14_ps(_mm512_sub_ps(vrstd, _mm512_mul_ps(vmean, vmean)));
_mm512_storeu_ps(rstd_data+i, vrstd);
_mm512_storeu_ps(bias_aux+i, _mm512_mul_ps(minus_ones, _mm512_mul_ps(vmean, vrstd)));
}
if (i < n) {
int rem = n - i;
__mmask16 mask = (1 << rem) - 1;
__m512 vrstd = _mm512_maskz_loadu_ps(mask, rstd_data+i);
__m512 vmean = _mm512_maskz_loadu_ps(mask, mean_data+i);
vrstd = _mm512_maskz_rsqrt14_ps(mask, _mm512_sub_ps(vrstd, _mm512_mul_ps(vmean, vmean)));
_mm512_mask_storeu_ps(rstd_data+i, mask, vrstd );
_mm512_mask_storeu_ps(bias_aux+i, mask, _mm512_mul_ps(minus_ones, _mm512_mul_ps(vmean, vrstd)));
}
#else
for (i = 0; i < n; i++) {
rstd_data[i] = (float)(1.0 / sqrt(rstd_data[i] - mean_data[i] * mean_data[i]));
bias_aux[i] = -1.f * mean_data[i] * rstd_data[i];
}
#endif
scaleout_params.in_ptr = sinp;
scaleout_params.out_ptr = sout;
scaleout_params.scale_vals_ptr = rstd_data;
scaleout_params.bias_vals_ptr = bias_aux;
scaleout_params.scale_vals_ptr2 = gamma;
scaleout_params.bias_vals_ptr2 = beta;
scaleout_kernel(&scaleout_params);
}
#else
LIBXSMM_INLINE
void optimized_blocked_layernorm(int m, int n, int bm, int bn, float *data_in, float *gamma_data, float *beta_data, float *mean_data, float *rstd_data)
{
int ld = bm, ld_vector = bn, _ld;
libxsmm_meltw_unary_flags jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE;
libxsmm_meltw_unary_type unary_type;
libxsmm_meltwfunction_unary reduce_rows_kernel, reduce_cols_kernel;
libxsmm_meltw_scal_flags jit_scale_flags = 0;
libxsmm_meltwfunction_scale scale_kernel;
libxsmm_meltw_scal_flags jit_scaleout_flags = 0;
libxsmm_meltwfunction_scale scaleout_kernel;
#if defined(_OPENMP)
int threads = omp_get_max_threads(); /* number of threads */
#else
int threads = 1; /* number of threads */
#endif
int nBlocks = n/bn;
int mBlocks = m/bm;
float *const scratch = (float*)libxsmm_aligned_scratch((2 * n * mBlocks + n) * sizeof(float), 0/*auto-alignment*/);
float *sums_sums_sq_ptr = scratch;
float *aux_bias_ptr = scratch + 2 * n * mBlocks;
LIBXSMM_VLA_DECL(3, float, sums_sums_sq,sums_sums_sq_ptr, mBlocks, 2*bn);
LIBXSMM_VLA_DECL(2, float, mean, mean_data, bn);
LIBXSMM_VLA_DECL(2, float, rstd, rstd_data, bn);
LIBXSMM_VLA_DECL(2, float, gamma, gamma_data, bm);
LIBXSMM_VLA_DECL(2, float, beta, beta_data, bm);
LIBXSMM_VLA_DECL(2, float, aux_bias, aux_bias_ptr, bn);
LIBXSMM_VLA_DECL(4, float, X, data_in, mBlocks, bn, bm);
/*libxsmm_barrier *barrier;*/
/* Generate JITED kernels for optimized code */
unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_X2_OP_ADD;
jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_ROWS;
reduce_rows_kernel = libxsmm_dispatch_meltw_unary(bm, bn, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type);
unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD;
jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS;
_ld = 2*bn;
reduce_cols_kernel = libxsmm_dispatch_meltw_unary(bn, mBlocks, &_ld, &ld_vector, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type);
jit_scale_flags = LIBXSMM_MELTW_FLAG_SCALE_ROWS | LIBXSMM_MELTW_FLAG_SCALE_MULT;
scale_kernel = libxsmm_dispatch_meltw_scale(bn, 1, &ld_vector, &ld_vector, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_scale_flags, 0);
jit_scaleout_flags = LIBXSMM_MELTW_FLAG_SCALE_ROWS_COLS | LIBXSMM_MELTW_FLAG_SCALE_MULT | LIBXSMM_MELTW_FLAG_SCALE_ADD_BIAS;
scaleout_kernel = libxsmm_dispatch_meltw_scale(bm, bn, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_scaleout_flags, 0);
#if defined(_OPENMP)
# pragma omp parallel
#endif
{
int i, imin, im, in;
float reverse_m = (float)(1.0 / m);
#if defined(__AVX512F__)
__m512 minus_ones = _mm512_set1_ps(-1.f);
#endif
#if defined(_OPENMP)
const int ltid = omp_get_thread_num();
#else
const int ltid = 0;
#endif
const int work_mn = nBlocks * mBlocks;
const int chunksize_mn = (work_mn % threads == 0) ? (work_mn /threads) : ((work_mn / threads) + 1);
const int thr_begin_mn = (ltid * chunksize_mn < work_mn) ? (ltid * chunksize_mn) : work_mn;
const int thr_end_mn = ((ltid + 1) * chunksize_mn < work_mn) ? ((ltid + 1) * chunksize_mn) : work_mn;
const int work_n = nBlocks;
const int chunksize_n = (work_n % threads == 0) ? (work_n /threads) : ((work_n / threads) + 1);
const int thr_begin_n = (ltid * chunksize_n < work_n) ? (ltid * chunksize_n) : work_n;
const int thr_end_n = ((ltid + 1) * chunksize_n < work_n) ? ((ltid + 1) * chunksize_n) : work_n;
libxsmm_meltw_unary_param reduce_rows_params, reduce_cols_params;
libxsmm_meltw_scale_param scale_params;
libxsmm_meltw_scale_param scaleout_params;
/*libxsmm_barrier_init(barrier, ltid);*/
for (imin = thr_begin_mn; imin < thr_end_mn; imin++) {
in = imin / mBlocks;
im = imin % mBlocks;
reduce_rows_params.in.primary = &LIBXSMM_VLA_ACCESS(4, X, in, im, 0, 0, mBlocks, bn, bm);
reduce_rows_params.out.primary = &LIBXSMM_VLA_ACCESS(3, sums_sums_sq, in, im, 0, mBlocks, 2*bn);
reduce_rows_kernel(&reduce_rows_params);
}
#pragma omp barrier
/*libxsmm_barrier_wait(barrier, ltid);*/
scale_params.scale_vals_ptr = &reverse_m;
for (in = thr_begin_n; in < thr_end_n; in++) {
reduce_cols_params.in.primary = &LIBXSMM_VLA_ACCESS(3, sums_sums_sq, in, 0, 0, mBlocks, 2*bn);
reduce_cols_params.out.primary = &LIBXSMM_VLA_ACCESS(2, mean, in, 0, bn);
reduce_cols_kernel(&reduce_cols_params);
scale_params.in_ptr = &LIBXSMM_VLA_ACCESS(2, mean, in, 0, bn);
scale_params.out_ptr = &LIBXSMM_VLA_ACCESS(2, mean, in, 0, bn);
scale_kernel(&scale_params);
reduce_cols_params.in.primary = &LIBXSMM_VLA_ACCESS(3, sums_sums_sq, in, 0, bn, mBlocks, 2*bn);
reduce_cols_params.out.primary = &LIBXSMM_VLA_ACCESS(2, rstd, in, 0, bn);
reduce_cols_kernel(&reduce_cols_params);
scale_params.in_ptr = &LIBXSMM_VLA_ACCESS(2, rstd, in, 0, bn);
scale_params.out_ptr = &LIBXSMM_VLA_ACCESS(2, rstd, in, 0, bn);
scale_kernel(&scale_params);
}
#pragma omp barrier
/*libxsmm_barrier_wait(barrier, ltid);*/
/* Calculate rstd and auxiliary bias vectors*/
for (in = thr_begin_n; in < thr_end_n; in++) {
float *rstd_ptr = &LIBXSMM_VLA_ACCESS(2, rstd, in, 0, bn);
float *mean_ptr = &LIBXSMM_VLA_ACCESS(2, mean, in, 0, bn);
float *bias_ptr = &LIBXSMM_VLA_ACCESS(2, aux_bias, in, 0, bn);
#if defined(__AVX512F__)
for (i = 0; i < bn-15; i+= 16) {
__m512 vrstd = _mm512_loadu_ps(rstd_ptr+i);
__m512 vmean = _mm512_loadu_ps(mean_ptr+i);
vrstd = _mm512_rsqrt14_ps(_mm512_sub_ps(vrstd, _mm512_mul_ps(vmean, vmean)));
_mm512_storeu_ps(rstd_ptr+i, vrstd);
_mm512_storeu_ps(bias_ptr+i, _mm512_mul_ps(minus_ones, _mm512_mul_ps(vmean, vrstd)));
}
if (i < bn) {
int rem = bn - i;
__mmask16 mask = (1 << rem) - 1;
__m512 vrstd = _mm512_maskz_loadu_ps(mask, rstd_ptr+i);
__m512 vmean = _mm512_maskz_loadu_ps(mask, mean_ptr+i);
vrstd = _mm512_maskz_rsqrt14_ps(mask, _mm512_sub_ps(vrstd, _mm512_mul_ps(vmean, vmean)));
_mm512_mask_storeu_ps(rstd_ptr+i, mask, vrstd );
_mm512_mask_storeu_ps(bias_ptr+i, mask, _mm512_mul_ps(minus_ones, _mm512_mul_ps(vmean, vrstd)));
}
#else
for (i = 0; i < bn; i++) {
rstd_ptr[i] = (float)(1.0 / sqrt(rstd_ptr[i] - mean_ptr[i] * mean_ptr[i]));
bias_ptr[i] = -1.f * mean_ptr[i] * mean_ptr[i];
}
#endif
}
#pragma omp barrier
/*libxsmm_barrier_wait(barrier, ltid);*/
for (imin = thr_begin_mn; imin < thr_end_mn; imin++) {
in = imin / mBlocks;
im = imin % mBlocks;
scaleout_params.in_ptr = &LIBXSMM_VLA_ACCESS(4, X, in, im, 0, 0, mBlocks, bn, bm);
scaleout_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, X, in, im, 0, 0, mBlocks, bn, bm);
scaleout_params.scale_vals_ptr = &LIBXSMM_VLA_ACCESS(2, rstd, in, 0, bn);
scaleout_params.bias_vals_ptr = &LIBXSMM_VLA_ACCESS(2, aux_bias, in, 0, bn);
scaleout_params.scale_vals_ptr2 = &LIBXSMM_VLA_ACCESS(2, gamma, im, 0, bm);
scaleout_params.bias_vals_ptr2 = &LIBXSMM_VLA_ACCESS(2, beta, im, 0, bm);
scaleout_kernel(&scaleout_params);
}
#pragma omp barrier
/*libxsmm_barrier_wait(barrier, ltid);*/
}
libxsmm_free(scratch);
}
#endif
LIBXSMM_INLINE
void optimized_blocked_layernorm_bwd(int m, int n, int bm, int bn, float *_dY, float *_X, float *_mean, float *_rstd, float *_gamma, float *_dX, float *_dgamma, float *_dbeta)
{
int ld = bm, ld_vector = bn;
libxsmm_meltw_unary_flags jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE;
libxsmm_meltw_unary_type unary_type;
libxsmm_meltwfunction_unary reduce_rows_kernel, reduce_cols_kernel, reduce_cols_kernel2, reduce_cols_kernel3;
int nBlocks = n/bn;
int mBlocks = m/bm;
float *const scratch = (float*)libxsmm_aligned_scratch((2 * n * mBlocks + 2 * m * nBlocks + 2 * n) * sizeof(float), 0/*auto-alignment*/);
float *dgamma_aux_ptr = scratch;
float *dbeta_aux_ptr = scratch + m * nBlocks;
float *ds_aux_ptr = scratch + 2 * m * nBlocks;
float *db_aux_ptr = scratch + 2 * m * nBlocks + n * mBlocks;
float *db_ptr = scratch + 2 * m * nBlocks + 2 * n * mBlocks;
float *ds_ptr = scratch + 2 * m * nBlocks + 2 * n * mBlocks + n;
LIBXSMM_VLA_DECL(3, float, ds_aux, ds_aux_ptr, mBlocks, bn);
LIBXSMM_VLA_DECL(3, float, db_aux, db_aux_ptr, mBlocks, bn);
LIBXSMM_VLA_DECL(3, float, dgamma_aux, dgamma_aux_ptr, nBlocks, bm);
LIBXSMM_VLA_DECL(3, float, dbeta_aux, dbeta_aux_ptr, nBlocks, bm);
LIBXSMM_VLA_DECL(4, float, dY, _dY, mBlocks, bn, bm);
LIBXSMM_VLA_DECL(4, float, X, _X, mBlocks, bn, bm);
LIBXSMM_VLA_DECL(4, float, dX, _dX, mBlocks, bn, bm);
LIBXSMM_VLA_DECL(2, float, mean, _mean, bn);
LIBXSMM_VLA_DECL(2, float, rstd, _rstd, bn);
LIBXSMM_VLA_DECL(2, float, gamma, _gamma, bm);
LIBXSMM_VLA_DECL(2, float, dgamma, _dgamma, bm);
LIBXSMM_VLA_DECL(2, float, dbeta, _dbeta, bm);
LIBXSMM_VLA_DECL(2, float, ds, ds_ptr, bn);
LIBXSMM_VLA_DECL(2, float, db, db_ptr, bn);
#if defined(_OPENMP)
int threads = omp_get_max_threads(); /* number of threads */
#else
int threads = 1; /* number of threads */
#endif
/* Generate JITED kernels for optimized code */
jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_ROWS;
unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD;
reduce_rows_kernel = libxsmm_dispatch_meltw_unary(bm, bn, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type);
jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS;
reduce_cols_kernel = libxsmm_dispatch_meltw_unary(bm, bn, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type);
reduce_cols_kernel2 = libxsmm_dispatch_meltw_unary(bm, nBlocks, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type);
reduce_cols_kernel3 = libxsmm_dispatch_meltw_unary(bn, mBlocks, &ld_vector, &ld_vector, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type);
#if !defined(_OPENMP)
float *const aux = (float*)libxsmm_aligned_scratch((3 * bm * bn) * sizeof(float), 0/*auto-alignment*/);
#else
float *const aux = (float*)libxsmm_aligned_scratch((3 * bm * bn) * sizeof(float) * omp_get_max_threads(), 0/*auto-alignment*/);
# pragma omp parallel
#endif
{
int imin, im, in, ii, jj;
float reverse_m = (float)(1.0 / m);
#if defined(__AVX512F__)
__m512 minus_ones = _mm512_set1_ps(-1.f);
__m512 scale = _mm512_set1_ps(reverse_m);
#endif
#if defined(_OPENMP)
const int ltid = omp_get_thread_num();
#else
const int ltid = 0;
#endif
const int work_mn = nBlocks * mBlocks;
const int chunksize_mn = (work_mn % threads == 0) ? (work_mn /threads) : ((work_mn / threads) + 1);
const int thr_begin_mn = (ltid * chunksize_mn < work_mn) ? (ltid * chunksize_mn) : work_mn;
const int thr_end_mn = ((ltid + 1) * chunksize_mn < work_mn) ? ((ltid + 1) * chunksize_mn) : work_mn;
const int work_n = nBlocks;
const int chunksize_n = (work_n % threads == 0) ? (work_n /threads) : ((work_n / threads) + 1);
const int thr_begin_n = (ltid * chunksize_n < work_n) ? (ltid * chunksize_n) : work_n;
const int thr_end_n = ((ltid + 1) * chunksize_n < work_n) ? ((ltid + 1) * chunksize_n) : work_n;
const int work_m = mBlocks;
const int chunksize_m = (work_m % threads == 0) ? (work_m /threads) : ((work_m / threads) + 1);
const int thr_begin_m = (ltid * chunksize_m < work_m) ? (ltid * chunksize_m) : work_m;
const int thr_end_m = ((ltid + 1) * chunksize_m < work_m) ? ((ltid + 1) * chunksize_m) : work_m;
libxsmm_meltw_unary_param reduce_rows_params, reduce_cols_params;
for (imin = thr_begin_mn; imin < thr_end_mn; imin++) {
float *const tmp = aux + bm*bn * (ltid*3 + 0); /* aux block for db */
float *const tmp2 = aux + bm*bn * (ltid*3 + 1); /* aux block for ds */
float *const tmp3 = aux + bm*bn * (ltid*3 + 2); /* aux block for dgamma */
in = imin / mBlocks;
im = imin % mBlocks;
#if defined(__AVX512F__)
/* Prepare blocks for reductions */
for (jj = 0; jj < bn; jj++) {
__m512 vrstd = _mm512_set1_ps(LIBXSMM_VLA_ACCESS(2, rstd, in, jj, bn));
__m512 vmean = _mm512_set1_ps(LIBXSMM_VLA_ACCESS(2, mean, in, jj, bn));
__m512 vb = _mm512_mul_ps(vrstd, _mm512_mul_ps(minus_ones, vmean));
for (ii = 0; ii < bm-15; ii+=16) {
__m512 vgamma = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(2, gamma, im, ii, bm));
__m512 vdY = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(4, dY, in, im, jj, ii, mBlocks, bn, bm));
__m512 vX = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(4, X, in, im, jj, ii, mBlocks, bn, bm));
__m512 vaux = _mm512_fmadd_ps(vrstd, vX, vb);
__m512 vtmp = _mm512_mul_ps(vgamma, vdY);
_mm512_storeu_ps((float*)tmp+jj*bm+ii, vtmp);
_mm512_storeu_ps((float*)tmp2+jj*bm+ii, _mm512_mul_ps(vtmp, vX));
_mm512_storeu_ps((float*)tmp3+jj*bm+ii, _mm512_mul_ps(vdY, vaux));
}
if (ii < bm) {
int rem = bm - ii;
__mmask16 mask = (1 << rem) - 1;
__m512 vgamma = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(2, gamma, im, ii, bm));
__m512 vdY = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(4, dY, in, im, jj, ii, mBlocks, bn, bm));
__m512 vX = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(4, X, in, im, jj, ii, mBlocks, bn, bm));
__m512 vaux = _mm512_fmadd_ps(vrstd, vX, vb);
__m512 vtmp = _mm512_mul_ps(vgamma, vdY);
_mm512_mask_storeu_ps((float*)tmp+jj*bm+ii, mask, vtmp);
_mm512_mask_storeu_ps((float*)tmp2+jj*bm+ii, mask, _mm512_mul_ps(vtmp, vX));
_mm512_mask_storeu_ps((float*)tmp3+jj*bm+ii, mask, _mm512_mul_ps(vdY, vaux));
}
}
#endif
/* Now perform reductions */
reduce_rows_params.in.primary = tmp;
reduce_rows_params.out.primary = &LIBXSMM_VLA_ACCESS(3, db_aux, in, im, 0, mBlocks, bn);
reduce_rows_kernel(&reduce_rows_params);
reduce_rows_params.in.primary = tmp2;
reduce_rows_params.out.primary = &LIBXSMM_VLA_ACCESS(3, ds_aux, in, im, 0, mBlocks, bn);
reduce_rows_kernel(&reduce_rows_params);
reduce_cols_params.in.primary = (float*)&LIBXSMM_VLA_ACCESS(4, dY, in, im, 0, 0, mBlocks, bn, bm);
reduce_cols_params.out.primary = &LIBXSMM_VLA_ACCESS(3, dbeta_aux, im, in, 0, nBlocks, bm);
reduce_cols_kernel(&reduce_cols_params);
reduce_cols_params.in.primary = tmp3;
reduce_cols_params.out.primary = &LIBXSMM_VLA_ACCESS(3, dgamma_aux, im, in, 0, nBlocks, bm);
reduce_cols_kernel(&reduce_cols_params);
}
#pragma omp barrier
/* Second level of reductions */
for (in = thr_begin_n; in < thr_end_n; in++) {
reduce_cols_params.in.primary = &LIBXSMM_VLA_ACCESS(3, db_aux, in, 0, 0, mBlocks, bn);
reduce_cols_params.out.primary = &LIBXSMM_VLA_ACCESS(2, db, in, 0, bn);
reduce_cols_kernel3(&reduce_cols_params);
reduce_cols_params.in.primary = &LIBXSMM_VLA_ACCESS(3, ds_aux, in, 0, 0, mBlocks, bn);
reduce_cols_params.out.primary = &LIBXSMM_VLA_ACCESS(2, ds, in, 0, bn);
reduce_cols_kernel3(&reduce_cols_params);
}
for (im = thr_begin_m; im < thr_end_m; im++) {
reduce_cols_params.in.primary = &LIBXSMM_VLA_ACCESS(3, dbeta_aux, im, 0, 0, nBlocks, bm);
reduce_cols_params.out.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, im, 0, bm);
reduce_cols_kernel2(&reduce_cols_params);
reduce_cols_params.in.primary = &LIBXSMM_VLA_ACCESS(3, dgamma_aux, im, 0, 0, nBlocks, bm);
reduce_cols_params.out.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, im, 0, bm);
reduce_cols_kernel2(&reduce_cols_params);
}
#pragma omp barrier
/* Calculate auxiliary b/c vectors -- overwritten on db/ds */
for (in = thr_begin_n; in < thr_end_n; in++) {
#if defined(__AVX512F__)
for (ii = 0; ii < bn-15; ii+=16) {
__m512 vmean = _mm512_loadu_ps(&LIBXSMM_VLA_ACCESS(2, mean, in, ii, bn));
__m512 vrstd = _mm512_loadu_ps(&LIBXSMM_VLA_ACCESS(2, rstd, in, ii, bn));
__m512 vdb = _mm512_loadu_ps(&LIBXSMM_VLA_ACCESS(2, db, in, ii, bn));
__m512 vds = _mm512_loadu_ps(&LIBXSMM_VLA_ACCESS(2, ds, in, ii, bn));
__m512 ascale = _mm512_mul_ps(vrstd, scale);
__m512 vrstd3 = _mm512_mul_ps(_mm512_mul_ps(vrstd, vrstd), ascale);
__m512 vb = _mm512_mul_ps(_mm512_fmsub_ps(vdb, vmean, vds), vrstd3);
__m512 vc = _mm512_sub_ps(_mm512_mul_ps(_mm512_mul_ps(minus_ones, vb), vmean), _mm512_mul_ps(vdb, ascale));
_mm512_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(2, db, in, ii, bn), vb);
_mm512_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(2, ds, in, ii, bn), vc);
}
if (ii < bn) {
int rem = bn - ii;
__mmask16 mask = (1 << rem) - 1;
__m512 vmean = _mm512_maskz_loadu_ps(mask, &LIBXSMM_VLA_ACCESS(2, mean, in, ii, bn));
__m512 vrstd = _mm512_maskz_loadu_ps(mask, &LIBXSMM_VLA_ACCESS(2, rstd, in, ii, bn));
__m512 vdb = _mm512_maskz_loadu_ps(mask, &LIBXSMM_VLA_ACCESS(2, db, in, ii, bn));
__m512 vds = _mm512_maskz_loadu_ps(mask, &LIBXSMM_VLA_ACCESS(2, ds, in, ii, bn));
__m512 ascale = _mm512_mul_ps(vrstd, scale);
__m512 vrstd3 = _mm512_mul_ps(_mm512_mul_ps(vrstd, vrstd), ascale);
__m512 vb = _mm512_mul_ps(_mm512_fmsub_ps(vdb, vmean, vds), vrstd3);
__m512 vc = _mm512_sub_ps(_mm512_mul_ps(_mm512_mul_ps(minus_ones, vb), vmean), _mm512_mul_ps(vdb, ascale));
_mm512_mask_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(2, db, in, ii, bn), mask, vb);
_mm512_mask_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(2, ds, in, ii, bn), mask, vc);
}
#endif
}
#pragma omp barrier
/* Final computation of dX */
for (imin = thr_begin_mn; imin < thr_end_mn; imin++) {
in = imin / mBlocks;
im = imin % mBlocks;
#if defined(__AVX512F__)
for (jj = 0; jj < bn; jj++) {
__m512 va = _mm512_set1_ps(LIBXSMM_VLA_ACCESS(2, rstd, in, jj, bn));
__m512 vb = _mm512_set1_ps(LIBXSMM_VLA_ACCESS(2, db, in, jj, bn));
__m512 vc = _mm512_set1_ps(LIBXSMM_VLA_ACCESS(2, ds, in, jj, bn));
for (ii = 0; ii < bm-15; ii+=16) {
__m512 vgamma = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(2, gamma, im, ii, bm));
__m512 vdY = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(4, dY, in, im, jj, ii, mBlocks, bn, bm));
__m512 vX = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(4, X, in, im, jj, ii, mBlocks, bn, bm));
__m512 vaux1 = _mm512_fmadd_ps(vb, vX, vc);
__m512 vaux2 = _mm512_mul_ps(va, _mm512_mul_ps(vdY, vgamma));
_mm512_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(4, dX, in, im, jj, ii, mBlocks, bn, bm), _mm512_add_ps(vaux1, vaux2));
}
if (ii < bm) {
int rem = bm - ii;
__mmask16 mask = (1 << rem) - 1;
__m512 vgamma = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(2, gamma, im, ii, bm));
__m512 vdY = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(4, dY, in, im, jj, ii, mBlocks, bn, bm));
__m512 vX = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(4, X, in, im, jj, ii, mBlocks, bn, bm));
__m512 vaux1 = _mm512_fmadd_ps(vb, vX, vc);
__m512 vaux2 = _mm512_mul_ps(va, _mm512_mul_ps(vdY, vgamma));
_mm512_mask_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(4, dX, in, im, jj, ii, mBlocks, bn, bm), mask, _mm512_add_ps(vaux1, vaux2));
}
}
#endif
}
#pragma omp barrier
}
libxsmm_free(scratch);
libxsmm_free(aux);
}
int main(int argc, char* argv[])
{
unsigned int m = 64, n = 64, iters = 10000, k = 0;
libxsmm_blasint ld_in = 64, ld_vector = 64, block_size = 64;
float *sinp, *gamma, *beta, *sout, *sout_nc, *mean_data, *rstd_data, *sout_ref, *mean_data_ref, *rstd_data_ref, *bias_aux, *mean_rstd_data;
float *dY_ref, *X_ref, *mean_ref, *rstd_ref, *gamma_ref, *dX_ref, *dgamma_ref, *dbeta_ref;
float *dY_bwd, *X_bwd, *dX_bwd, *dgamma_bwd, *dbeta_bwd, *dX_bwd_nc;
libxsmm_matdiff_info norms_out, norms_mean, norms_rstd, norms_dx, norms_dbeta, norms_dgamma;
unsigned long long l_start, l_end;
double l_total = 0, l_total2 = 0;
#if 0
libxsmm_meltw_redu_flags jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_NONE;
libxsmm_meltwfunction_reduce reduce_kernel;
#endif
libxsmm_meltw_scal_flags jit_scalemean_flags = 0;
libxsmm_meltwfunction_scale scalemean_kernel;
libxsmm_meltw_scal_flags jit_scaleout_flags = 0;
libxsmm_meltwfunction_scale scaleout_kernel;
libxsmm_init();
libxsmm_matdiff_clear(&norms_out);
libxsmm_matdiff_clear(&norms_mean);
libxsmm_matdiff_clear(&norms_rstd);
libxsmm_matdiff_clear(&norms_dx);
libxsmm_matdiff_clear(&norms_dbeta);
libxsmm_matdiff_clear(&norms_dgamma);
if ( argc > 1 ) m = atoi(argv[1]);
if ( argc > 2 ) n = atoi(argv[2]);
if ( argc > 3 ) iters = atoi(argv[3]);
if ( argc > 4 ) block_size = atoi(argv[4]);
libxsmm_init();
ld_in = m;
n = LIBXSMM_MAX(n,1);
ld_vector = n;
ld_in = LIBXSMM_MAX(ld_in,(libxsmm_blasint)m);
/* Allocate arrays */
sinp = (float*) malloc(ld_in*n*sizeof(float));
gamma = (float*) malloc(m*sizeof(float) );
beta = (float*) malloc(m*sizeof(float) );
sout = (float*) malloc(ld_in*n*sizeof(float) );
sout_nc = (float*) malloc(ld_in*n*sizeof(float) );
mean_rstd_data = (float*) malloc(2*n*sizeof(float) );
mean_data = (float*) mean_rstd_data;
rstd_data = (float*) mean_rstd_data + n;
dY_ref = (float*) malloc(m*n*sizeof(float));
dY_bwd = (float*) malloc(m*n*sizeof(float));
X_ref = (float*) malloc(m*n*sizeof(float));
X_bwd = (float*) malloc(m*n*sizeof(float));
mean_ref = (float*) malloc(n*sizeof(float));
rstd_ref = (float*) malloc(n*sizeof(float));
gamma_ref = (float*) malloc(m*sizeof(float));
dX_ref = (float*) malloc(m*n*sizeof(float));
dX_bwd = (float*) malloc(m*n*sizeof(float));
dX_bwd_nc = (float*) malloc(m*n*sizeof(float));
dgamma_ref= (float*) malloc(m*sizeof(float));
dgamma_bwd= (float*) malloc(m*sizeof(float));
dbeta_ref = (float*) malloc(m*sizeof(float));
dbeta_bwd = (float*) malloc(m*sizeof(float));
/* Allocate reference arrays */
mean_data_ref = (float*) malloc(n*sizeof(float) );
rstd_data_ref = (float*) malloc(n*sizeof(float) );
sout_ref = (float*) malloc(ld_in*n*sizeof(float) );
/* Allocate auxiliary arrays for optimized version */
bias_aux = (float*) malloc(n*sizeof(float) );
/* Fill matrices with random data */
sfill_matrix ( sinp, ld_in, m, n );
sfill_matrix ( gamma, ld_in, m, 1 );
sfill_matrix ( beta, ld_in, m, 1 );
sfill_matrix ( dY_ref, ld_in, m, n );
matrix_copy_NC_to_NCNC( dY_ref, dY_bwd, 1, n, m, block_size, block_size );
sfill_matrix ( X_ref, ld_in, m, n );
matrix_copy_NC_to_NCNC( X_ref, X_bwd, 1, n, m, block_size, block_size );
sfill_matrix ( mean_ref, n, n, 1 );
sfill_matrix ( rstd_ref, n, n, 1 );
sfill_matrix ( gamma_ref, m, m, 1 );
/* Calculate reference results... */
naive_layernorm(m, n, ld_in, sinp, gamma, beta, sout_ref, mean_data_ref, rstd_data_ref);
naive_layernorm_bwd(m, n, ld_in, dY_ref, X_ref, mean_ref, rstd_ref, gamma_ref, dX_ref, dgamma_ref, dbeta_ref);
#if 0
/* Generate JITED kernels for optimized code */
jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_ROWS | LIBXSMM_MELTW_FLAG_REDUCE_OP_ADD | LIBXSMM_MELTW_FLAG_REDUCE_ELTS | LIBXSMM_MELTW_FLAG_REDUCE_ELTS_SQUARED;
printf("JITing reduce kernel... \n");
reduce_kernel = libxsmm_dispatch_meltw_reduce(m, n, &ld_in, &ld_in, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, 0);
jit_scalemean_flags = LIBXSMM_MELTW_FLAG_SCALE_ROWS | LIBXSMM_MELTW_FLAG_SCALE_MULT;
printf("JITing mean-scale kernel... \n");
scalemean_kernel = libxsmm_dispatch_meltw_scale(n, 1, &ld_vector, &ld_vector, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_scalemean_flags, 0);
jit_scaleout_flags = LIBXSMM_MELTW_FLAG_SCALE_ROWS_COLS | LIBXSMM_MELTW_FLAG_SCALE_MULT | LIBXSMM_MELTW_FLAG_SCALE_ADD_BIAS;
printf("JITing scaling kernel for output... \n");
scaleout_kernel = libxsmm_dispatch_meltw_scale(m, n, &ld_in, &ld_in, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_scaleout_flags, 0);
#endif
/* Calculate blocked results... */
#if 0
optimized_layernorm(m, n, ld_in, sinp, gamma, beta, sout, mean_data, rstd_data, reduce_kernel, scalemean_kernel, scaleout_kernel, bias_aux);
#else
matrix_copy_NC_to_NCNC( sinp, sout, 1, n, m, block_size, block_size );
optimized_blocked_layernorm(m, n, block_size, block_size, sout, gamma, beta, mean_data, rstd_data);
matrix_copy_NCNC_to_NC( sout, sout_nc, 1, n, m, block_size, block_size );
optimized_blocked_layernorm_bwd(m, n, block_size, block_size, dY_bwd, X_bwd, mean_ref, rstd_ref, gamma_ref, dX_bwd, dgamma_bwd, dbeta_bwd);
matrix_copy_NCNC_to_NC( dX_bwd, dX_bwd_nc, 1, n, m, block_size, block_size );
#endif
/* compare */
printf("##########################################\n");
printf("# Correctness FWD - Output #\n");
printf("##########################################\n");
#if 0
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, ld_in*n, 1, sout_ref, sout, 0, 0);
#else
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, ld_in*n, 1, sout_ref, sout_nc, 0, 0);
#endif
printf("L1 reference : %.25g\n", norms_out.l1_ref);
printf("L1 test : %.25g\n", norms_out.l1_tst);
printf("L2 abs.error : %.24f\n", norms_out.l2_abs);
printf("L2 rel.error : %.24f\n", norms_out.l2_rel);
printf("Linf abs.error: %.24f\n", norms_out.linf_abs);
printf("Linf rel.error: %.24f\n", norms_out.linf_rel);
printf("Check-norm : %.24f\n\n", norms_out.normf_rel);
/* compare */
printf("##########################################\n");
printf("# Correctness FWD - Mean #\n");
printf("##########################################\n");
libxsmm_matdiff(&norms_mean, LIBXSMM_DATATYPE_F32, n, 1, mean_data_ref, mean_data, 0, 0);
printf("L1 reference : %.25g\n", norms_mean.l1_ref);
printf("L1 test : %.25g\n", norms_mean.l1_tst);
printf("L2 abs.error : %.24f\n", norms_mean.l2_abs);
printf("L2 rel.error : %.24f\n", norms_mean.l2_rel);
printf("Linf abs.error: %.24f\n", norms_mean.linf_abs);
printf("Linf rel.error: %.24f\n", norms_mean.linf_rel);
printf("Check-norm : %.24f\n\n", norms_mean.normf_rel);
/* compare */
printf("##########################################\n");
printf("# Correctness FWD - Rstd #\n");
printf("##########################################\n");
libxsmm_matdiff(&norms_rstd, LIBXSMM_DATATYPE_F32, n, 1, rstd_data_ref, rstd_data, 0, 0);
printf("L1 reference : %.25g\n", norms_rstd.l1_ref);
printf("L1 test : %.25g\n", norms_rstd.l1_tst);
printf("L2 abs.error : %.24f\n", norms_rstd.l2_abs);
printf("L2 rel.error : %.24f\n", norms_rstd.l2_rel);
printf("Linf abs.error: %.24f\n", norms_rstd.linf_abs);
printf("Linf rel.error: %.24f\n", norms_rstd.linf_rel);
printf("Check-norm : %.24f\n\n", norms_rstd.normf_rel);
/* compare */
printf("##########################################\n");
printf("# Correctness BWD - dX #\n");
printf("##########################################\n");
libxsmm_matdiff(&norms_dx, LIBXSMM_DATATYPE_F32, ld_in*n, 1, dX_ref, dX_bwd_nc, 0, 0);
printf("L1 reference : %.25g\n", norms_dx.l1_ref);
printf("L1 test : %.25g\n", norms_dx.l1_tst);
printf("L2 abs.error : %.24f\n", norms_dx.l2_abs);
printf("L2 rel.error : %.24f\n", norms_dx.l2_rel);
printf("Linf abs.error: %.24f\n", norms_dx.linf_abs);
printf("Linf rel.error: %.24f\n", norms_dx.linf_rel);
printf("Check-norm : %.24f\n\n", norms_dx.normf_rel);
/* compare */
printf("##########################################\n");
printf("# Correctness BWD - dbeta #\n");
printf("##########################################\n");
libxsmm_matdiff(&norms_dbeta, LIBXSMM_DATATYPE_F32, m, 1, dbeta_ref, dbeta_bwd, 0, 0);
printf("L1 reference : %.25g\n", norms_dbeta.l1_ref);
printf("L1 test : %.25g\n", norms_dbeta.l1_tst);
printf("L2 abs.error : %.24f\n", norms_dbeta.l2_abs);
printf("L2 rel.error : %.24f\n", norms_dbeta.l2_rel);
printf("Linf abs.error: %.24f\n", norms_dbeta.linf_abs);
printf("Linf rel.error: %.24f\n", norms_dbeta.linf_rel);
printf("Check-norm : %.24f\n\n", norms_dbeta.normf_rel);
/* compare */
printf("##########################################\n");
printf("# Correctness BWD - dgamma #\n");
printf("##########################################\n");
libxsmm_matdiff(&norms_dgamma, LIBXSMM_DATATYPE_F32, m, 1, dgamma_ref, dgamma_bwd, 0, 0);
printf("L1 reference : %.25g\n", norms_dgamma.l1_ref);
printf("L1 test : %.25g\n", norms_dgamma.l1_tst);
printf("L2 abs.error : %.24f\n", norms_dgamma.l2_abs);
printf("L2 rel.error : %.24f\n", norms_dgamma.l2_rel);
printf("Linf abs.error: %.24f\n", norms_dgamma.linf_abs);
printf("Linf rel.error: %.24f\n", norms_dgamma.linf_rel);
printf("Check-norm : %.24f\n\n", norms_dgamma.normf_rel);
l_start = libxsmm_timer_tick();
/* Calculate reference results... */
for (k = 0; k < iters; k++) {
naive_layernorm(m, n, ld_in, sinp, gamma, beta, sout_ref, mean_data_ref, rstd_data_ref);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Reference fwd time = %.5g\n", ((double)(l_total)));
l_start = libxsmm_timer_tick();
for (k = 0; k < iters; k++) {
#if 1
optimized_blocked_layernorm(m, n, block_size, block_size, sout, gamma, beta, mean_data, rstd_data);
#else
optimized_layernorm(m, n, ld_in, sinp, gamma, beta, sout, mean_data, rstd_data, reduce_kernel, scalemean_kernel, scaleout_kernel, bias_aux);
#endif
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("Optimized fwd time = %.5g\n", ((double)(l_total2)));
printf("Speedup fwd is = %.5g\n", ((double)(l_total/l_total2)));
l_start = libxsmm_timer_tick();
/* Calculate reference results... */
for (k = 0; k < iters; k++) {
naive_layernorm_bwd(m, n, ld_in, dY_ref, X_ref, mean_ref, rstd_ref, gamma_ref, dX_ref, dgamma_ref, dbeta_ref);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Reference bwd time = %.5g\n", ((double)(l_total)));
l_start = libxsmm_timer_tick();
for (k = 0; k < iters; k++) {
optimized_blocked_layernorm_bwd(m, n, block_size, block_size, dY_bwd, X_bwd, mean_ref, rstd_ref, gamma_ref, dX_bwd, dgamma_bwd, dbeta_bwd);
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("Optimized bwd time = %.5g\n", ((double)(l_total2)));
printf("Speedup bwd is = %.5g\n", ((double)(l_total/l_total2)));
/* Free allocated arrays */
free(sinp);
free(gamma);
free(beta);
free(sout);
free(mean_rstd_data);
free(mean_data_ref);
free(rstd_data_ref);
free(sout_ref);
free(bias_aux);
free(dY_ref);
free(X_ref);
free(mean_ref);
free(rstd_ref);
free(gamma_ref);
free(dX_ref);
free(dgamma_ref);
free(dbeta_ref);
free(dY_bwd);
free(X_bwd);
free(dX_bwd);
free(dgamma_bwd);
free(dbeta_bwd);
free(dX_bwd_nc);
return EXIT_SUCCESS;
}
|
GB_unaryop__abs_uint64_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint64_uint16
// op(A') function: GB_tran__abs_uint64_uint16
// C type: uint64_t
// A type: uint16_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint64_uint16
(
uint64_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
exercicio02_full.c | #include <stdio.h>
#include <stdlib.h>
#include "omp.h"
static long num_steps = 100000000;
int main() {
double pi, sum = 0.0, tempo_inicial, tempo_final, step;
int nthreads = 4;
step = 1.0 / (double)num_steps;
omp_set_num_threads(nthreads);
tempo_inicial = omp_get_wtime();
#pragma omp parallel
{
int id = omp_get_thread_num();
double local_sum = 0.0, x;
int i;
printf("thread id: %d\n", id);
for (i = id; i < num_steps; i = i + nthreads) {
x = (i + 0.5) * step;
local_sum = local_sum + 4.0 / (1.0 + x * x);
}
#pragma omp critical
{
sum = sum + local_sum;
}
}
pi = step * sum;
tempo_final = omp_get_wtime();
printf("\nPi = %lf", pi);
printf("\nTempo gasto: %lf\n", tempo_final - tempo_inicial);
return 0;
}
|
convolution_3x3_pack4to1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_pack4to1_sse(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 4a-inch/4a-64-outch;
kernel_tm_pack4.create(4 * inch / 4, 64, outch / 4 + outch % 4, (size_t)4u * 4, 4);
int p = 0;
for (; p + 3 < outch; p += 4)
{
const Mat k0 = kernel_tm.channel(p);
const Mat k1 = kernel_tm.channel(p + 1);
const Mat k2 = kernel_tm.channel(p + 2);
const Mat k3 = kernel_tm.channel(p + 3);
Mat g0 = kernel_tm_pack4.channel(p / 4);
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row(k);
for (int q = 0; q + 3 < inch; q += 4)
{
const float* k00 = k0.row(q);
const float* k01 = k0.row(q + 1);
const float* k02 = k0.row(q + 2);
const float* k03 = k0.row(q + 3);
const float* k10 = k1.row(q);
const float* k11 = k1.row(q + 1);
const float* k12 = k1.row(q + 2);
const float* k13 = k1.row(q + 3);
const float* k20 = k2.row(q);
const float* k21 = k2.row(q + 1);
const float* k22 = k2.row(q + 2);
const float* k23 = k2.row(q + 3);
const float* k30 = k3.row(q);
const float* k31 = k3.row(q + 1);
const float* k32 = k3.row(q + 2);
const float* k33 = k3.row(q + 3);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k01[k];
g00[5] = k11[k];
g00[6] = k21[k];
g00[7] = k31[k];
g00[8] = k02[k];
g00[9] = k12[k];
g00[10] = k22[k];
g00[11] = k32[k];
g00[12] = k03[k];
g00[13] = k13[k];
g00[14] = k23[k];
g00[15] = k33[k];
g00 += 16;
}
}
}
for (; p < outch; p++)
{
const Mat k0 = kernel_tm.channel(p);
Mat g0 = kernel_tm_pack4.channel(p / 4 + p % 4);
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row(k);
for (int q = 0; q + 3 < inch; q += 4)
{
const float* k00 = k0.row(q);
const float* k01 = k0.row(q + 1);
const float* k02 = k0.row(q + 2);
const float* k03 = k0.row(q + 3);
g00[0] = k00[k];
g00[1] = k01[k];
g00[2] = k02[k];
g00[3] = k03[k];
g00 += 4;
}
}
}
}
static void conv3x3s1_winograd64_pack4to1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
#ifdef _MSC_VER
__declspec(align(16))
#else
__attribute__((aligned(16)))
#endif
float tmp[8][8][4];
__m128 _v5_25 = _mm_set1_ps(5.25f);
__m128 _vm4_25 = _mm_set1_ps(-4.25f);
__m128 _vm1_25 = _mm_set1_ps(-1.25f);
__m128 _v0_25 = _mm_set1_ps(0.25f);
__m128 _vm2_5 = _mm_set1_ps(-2.5f);
__m128 _v0_5 = _mm_set1_ps(0.5f);
__m128 _v2 = _mm_set1_ps(2.f);
__m128 _v4 = _mm_set1_ps(4.f);
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
const float* r0 = img0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
__m128 _r00 = _mm_load_ps(r0);
__m128 _r01 = _mm_load_ps(r0 + 4);
__m128 _r02 = _mm_load_ps(r0 + 4 * 2);
__m128 _r03 = _mm_load_ps(r0 + 4 * 3);
__m128 _r04 = _mm_load_ps(r0 + 4 * 4);
__m128 _r05 = _mm_load_ps(r0 + 4 * 5);
__m128 _r06 = _mm_load_ps(r0 + 4 * 6);
__m128 _r07 = _mm_load_ps(r0 + 4 * 7);
__m128 _tmp0m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r04, _r02), _mm_sub_ps(_r00, _r06));
__m128 _tmp7m = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_r03, _r05), _mm_sub_ps(_r07, _r01));
_mm_store_ps(tmp[0][m], _tmp0m);
_mm_store_ps(tmp[7][m], _tmp7m);
__m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _r04, _mm_add_ps(_r02, _r06));
__m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _r03, _mm_add_ps(_r01, _r05));
__m128 _tmp1m = _mm_add_ps(_tmp12a, _tmp12b);
__m128 _tmp2m = _mm_sub_ps(_tmp12a, _tmp12b);
_mm_store_ps(tmp[1][m], _tmp1m);
_mm_store_ps(tmp[2][m], _tmp2m);
__m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _r04, _mm_comp_fmadd_ps(_v0_25, _r02, _r06));
__m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v0_5)));
__m128 _tmp3m = _mm_add_ps(_tmp34a, _tmp34b);
__m128 _tmp4m = _mm_sub_ps(_tmp34a, _tmp34b);
_mm_store_ps(tmp[3][m], _tmp3m);
_mm_store_ps(tmp[4][m], _tmp4m);
__m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _r04, _r02), _r06);
__m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _r05, _mm_comp_fmadd_ps(_vm2_5, _r03, _mm_mul_ps(_r01, _v2)));
__m128 _tmp5m = _mm_add_ps(_tmp56a, _tmp56b);
__m128 _tmp6m = _mm_sub_ps(_tmp56a, _tmp56b);
_mm_store_ps(tmp[5][m], _tmp5m);
_mm_store_ps(tmp[6][m], _tmp6m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2;
float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3;
float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4;
float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5;
float* r0_tm_6 = r0_tm_0 + tiles * 4 * 6;
float* r0_tm_7 = r0_tm_0 + tiles * 4 * 7;
for (int m = 0; m < 8; m++)
{
__m128 _tmp00 = _mm_load_ps(tmp[m][0]);
__m128 _tmp01 = _mm_load_ps(tmp[m][1]);
__m128 _tmp02 = _mm_load_ps(tmp[m][2]);
__m128 _tmp03 = _mm_load_ps(tmp[m][3]);
__m128 _tmp04 = _mm_load_ps(tmp[m][4]);
__m128 _tmp05 = _mm_load_ps(tmp[m][5]);
__m128 _tmp06 = _mm_load_ps(tmp[m][6]);
__m128 _tmp07 = _mm_load_ps(tmp[m][7]);
__m128 _r0tm0 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp04, _tmp02), _mm_sub_ps(_tmp00, _tmp06));
__m128 _r0tm7 = _mm_comp_fmadd_ps(_v5_25, _mm_sub_ps(_tmp03, _tmp05), _mm_sub_ps(_tmp07, _tmp01));
__m128 _tmp12a = _mm_comp_fmadd_ps(_vm4_25, _tmp04, _mm_add_ps(_tmp02, _tmp06));
__m128 _tmp12b = _mm_comp_fmadd_ps(_vm4_25, _tmp03, _mm_add_ps(_tmp01, _tmp05));
__m128 _r0tm1 = _mm_add_ps(_tmp12a, _tmp12b);
__m128 _r0tm2 = _mm_sub_ps(_tmp12a, _tmp12b);
__m128 _tmp34a = _mm_comp_fmadd_ps(_vm1_25, _tmp04, _mm_comp_fmadd_ps(_v0_25, _tmp02, _tmp06));
__m128 _tmp34b = _mm_comp_fmadd_ps(_v2, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v0_5)));
__m128 _r0tm3 = _mm_add_ps(_tmp34a, _tmp34b);
__m128 _r0tm4 = _mm_sub_ps(_tmp34a, _tmp34b);
__m128 _tmp56a = _mm_comp_fmadd_ps(_v4, _mm_comp_fmadd_ps(_vm1_25, _tmp04, _tmp02), _tmp06);
__m128 _tmp56b = _mm_comp_fmadd_ps(_v0_5, _tmp05, _mm_comp_fmadd_ps(_vm2_5, _tmp03, _mm_mul_ps(_tmp01, _v2)));
__m128 _r0tm5 = _mm_add_ps(_tmp56a, _tmp56b);
__m128 _r0tm6 = _mm_sub_ps(_tmp56a, _tmp56b);
_mm_store_ps(r0_tm_0, _r0tm0);
_mm_store_ps(r0_tm_1, _r0tm1);
_mm_store_ps(r0_tm_2, _r0tm2);
_mm_store_ps(r0_tm_3, _r0tm3);
_mm_store_ps(r0_tm_4, _r0tm4);
_mm_store_ps(r0_tm_5, _r0tm5);
_mm_store_ps(r0_tm_6, _r0tm6);
_mm_store_ps(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 4 * 8;
r0_tm_1 += tiles * 4 * 8;
r0_tm_2 += tiles * 4 * 8;
r0_tm_3 += tiles * 4 * 8;
r0_tm_4 += tiles * 4 * 8;
r0_tm_5 += tiles * 4 * 8;
r0_tm_6 += tiles * 4 * 8;
r0_tm_7 += tiles * 4 * 8;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x8
__m128 _r0 = _mm_load_ps(r0);
__m128 _r1 = _mm_load_ps(r0 + 4);
__m128 _r2 = _mm_load_ps(r0 + 4 * 2);
__m128 _r3 = _mm_load_ps(r0 + 4 * 3);
__m128 _r4 = _mm_load_ps(r0 + 4 * 4);
__m128 _r5 = _mm_load_ps(r0 + 4 * 5);
__m128 _r6 = _mm_load_ps(r0 + 4 * 6);
__m128 _r7 = _mm_load_ps(r0 + 4 * 7);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_mm_store_ps(tmpptr, _r0);
_mm_store_ps(tmpptr + 4, _r4);
_mm_store_ps(tmpptr + 4 * 2, _r1);
_mm_store_ps(tmpptr + 4 * 3, _r5);
_mm_store_ps(tmpptr + 4 * 4, _r2);
_mm_store_ps(tmpptr + 4 * 5, _r6);
_mm_store_ps(tmpptr + 4 * 6, _r3);
_mm_store_ps(tmpptr + 4 * 7, _r7);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 32;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 8 + (i % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
// transpose 4x4
__m128 _r0 = _mm_load_ps(r0);
__m128 _r1 = _mm_load_ps(r0 + 4);
__m128 _r2 = _mm_load_ps(r0 + 4 * 2);
__m128 _r3 = _mm_load_ps(r0 + 4 * 3);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_mm_store_ps(tmpptr, _r0);
_mm_store_ps(tmpptr + 4, _r1);
_mm_store_ps(tmpptr + 4 * 2, _r2);
_mm_store_ps(tmpptr + 4 * 3, _r3);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 16;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 8 + (i % 8) / 4 + i % 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
__m128 _val = _mm_load_ps(r0);
_mm_store_ps(tmpptr, _val);
r0 += bottom_blob_tm.cstep * 4;
tmpptr += 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator);
int nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
const Mat kernel01_tm = kernel_tm.channel(p / 4);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 8);
const float* kptr = kernel01_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
__m128 _sum2 = _mm_setzero_ps();
__m128 _sum3 = _mm_setzero_ps();
__m128 _sum4 = _mm_setzero_ps();
__m128 _sum5 = _mm_setzero_ps();
__m128 _sum6 = _mm_setzero_ps();
__m128 _sum7 = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _val0 = _mm_load_ps(r0);
__m128 _val1 = _mm_load_ps(r0 + 4);
__m128 _w0 = _mm_load1_ps(kptr);
__m128 _w1 = _mm_load1_ps(kptr + 1);
__m128 _w2 = _mm_load1_ps(kptr + 2);
__m128 _w3 = _mm_load1_ps(kptr + 3);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val0, _w1, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val1, _w1, _sum3);
_sum4 = _mm_comp_fmadd_ps(_val0, _w2, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val1, _w2, _sum5);
_sum6 = _mm_comp_fmadd_ps(_val0, _w3, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val1, _w3, _sum7);
r0 += 8;
kptr += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output0_tm + 4, _sum1);
_mm_storeu_ps(output1_tm, _sum2);
_mm_storeu_ps(output1_tm + 4, _sum3);
_mm_storeu_ps(output2_tm, _sum4);
_mm_storeu_ps(output2_tm + 4, _sum5);
_mm_storeu_ps(output3_tm, _sum6);
_mm_storeu_ps(output3_tm + 4, _sum7);
output0_tm += 8;
output1_tm += 8;
output2_tm += 8;
output3_tm += 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
const float* kptr = kernel01_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
__m128 _sum2 = _mm_setzero_ps();
__m128 _sum3 = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _val0 = _mm_load_ps(r0);
__m128 _w0 = _mm_load1_ps(kptr);
__m128 _w1 = _mm_load1_ps(kptr + 1);
__m128 _w2 = _mm_load1_ps(kptr + 2);
__m128 _w3 = _mm_load1_ps(kptr + 3);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val0, _w1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val0, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val0, _w3, _sum3);
r0 += 4;
kptr += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = kernel01_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _val0 = _mm_load1_ps(r0);
__m128 _w0 = _mm_load_ps(kptr);
_sum = _mm_comp_fmadd_ps(_val0, _w0, _sum);
r0 += 1;
kptr += 4;
}
float sum[4];
_mm_storeu_ps(sum, _sum);
output0_tm[0] = sum[0];
output1_tm[0] = sum[1];
output2_tm[0] = sum[2];
output3_tm[0] = sum[3];
output0_tm += 1;
output1_tm += 1;
output2_tm += 1;
output3_tm += 1;
}
}
}
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 8);
const float* kptr = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _val0 = _mm_load_ps(r0);
__m128 _val1 = _mm_load_ps(r0 + 4);
__m128 _w0 = _mm_load1_ps(kptr);
_sum0 = _mm_comp_fmadd_ps(_w0, _val0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_w0, _val1, _sum1);
r0 += 8;
kptr += 1;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output0_tm + 4, _sum1);
output0_tm += 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
const float* kptr = kernel0_tm.row(r);
int nn = inch * 4; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m128 _val0 = _mm_load_ps(r0);
__m128 _w0 = _mm_load1_ps(kptr);
_sum0 = _mm_comp_fmadd_ps(_w0, _val0, _sum0);
r0 += 4;
kptr += 1;
}
_mm_storeu_ps(output0_tm, _sum0);
output0_tm += 4;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = kernel0_tm.row(r);
__m128 _sum0 = _mm_setzero_ps();
for (int q = 0; q < inch; q++)
{
__m128 _val0 = _mm_load_ps(r0);
__m128 _w0 = _mm_load_ps(kptr);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
r0 += 4;
kptr += 4;
}
float sum0 = _mm_reduce_add_ps(_sum0);
output0_tm[0] = sum0;
output0_tm++;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
float tmp[6][8];
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
// top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator);
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 1;
const float* output0_tm_1 = output0_tm_0 + tiles * 1;
const float* output0_tm_2 = output0_tm_0 + tiles * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * 5;
const float* output0_tm_6 = output0_tm_0 + tiles * 6;
const float* output0_tm_7 = output0_tm_0 + tiles * 7;
// TODO sse optimize
for (int m = 0; m < 8; m++)
{
float tmp024a = output0_tm_1[0] + output0_tm_2[0];
float tmp135a = output0_tm_1[0] - output0_tm_2[0];
float tmp024b = output0_tm_3[0] + output0_tm_4[0];
float tmp135b = output0_tm_3[0] - output0_tm_4[0];
float tmp024c = output0_tm_5[0] + output0_tm_6[0];
float tmp135c = output0_tm_5[0] - output0_tm_6[0];
tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += tiles * 8;
output0_tm_1 += tiles * 8;
output0_tm_2 += tiles * 8;
output0_tm_3 += tiles * 8;
output0_tm_4 += tiles * 8;
output0_tm_5 += tiles * 8;
output0_tm_6 += tiles * 8;
output0_tm_7 += tiles * 8;
}
float* output0 = out0.row(i * 6) + j * 6;
for (int m = 0; m < 6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
wino_conv_kernel_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include <arm_neon.h>
#include "wino_conv_kernel_arm.h"
#define TILE 4
#define ELEM_SIZE ((TILE + 2) * (TILE + 2))
#define WINO_MAX(a, b) ((a) > (b) ? (a) : (b))
#define WINO_MIN(a, b) ((a) < (b) ? (a) : (b))
#ifdef __aarch64__
#define PER_OUT_CHAN 16
void tran_inp_4(float*, float*, float*, int, int, int);
void wino_sgemm_4x16_A72(float* output, const float* input, const float* kernel, long cin, short stride_save);
void wino_sgemm_4x4_A72(float* output, const float* input, const float* kernel, long cin, short stride_save);
void wino_sgemm_1x16(float* output, const float* input, const float* kernel, long cin);
void wino_sgemm_1x4(float* output, const float* input, const float* kernel, long cin);
void tran_out_4(float*, float*, int, float*, float*, int);
#else
#define PER_OUT_CHAN 12
void wino_sgemm_4x12_A17(float* output, const float* input, const float* kernel, long cin);
void wino_sgemm_4x4_A17(float* output, const float* input, const float* kernel, long cin);
void wino_sgemm_1x12_A17(float* output, const float* input, const float* kernel, long cin);
// need to be optimized by neon
static inline void wino_sgemm_1x4_cpu(float* output, const float* input, const float* kernel, long cin)
{
for (int i = 0; i < 4; i++)
{
float sum = 0;
for (int k = 0; k < cin; k++)
{
sum += input[k] * kernel[k * 4 + i];
}
output[i] = sum;
}
}
#endif
static inline void trans_kernel_f43(float* ker, float* trans_ker)
{
/*
float G[18]={
1./4 , 0. , 0. ,
-1./6 , -1./6 , -1./6 ,
-1./6 , 1./6 , -1./6 ,
1./24 , 1./12 , 1./6 ,
1./24 , -1./12 , 1./6 ,
0. , 0. , 1.
};
float GT[18]={
1./4 , -1./6, -1./6 , 1./24, 1./24 , 0.,
0., -1./6, 1./6 , 1./12, -1./12 , 0.,
0., -1./6, -1./6 , 1./6, 1./6 , 1.
};
*/
float tmp[18] = {0};
float neg_r0_add_r2_x_1_6[6]; // (r0+r2)*1./6
float r0_1_4_add_r2_x_1_6[6]; // (r0*1/4 + r2)*1./6
float r1_1_6[6]; // r1*1/6
float r1_1_12[6]; // r1*1/12
float s_1_6 = 1. / 6.f;
for (int j = 0; j < 3; j++)
{
neg_r0_add_r2_x_1_6[j] = -(ker[j] + ker[6 + j]) * s_1_6;
r0_1_4_add_r2_x_1_6[j] = (ker[j] * 0.25 + ker[6 + j]) * s_1_6;
r1_1_6[j] = ker[3 + j] * s_1_6;
r1_1_12[j] = r1_1_6[j] * 0.5;
}
for (int j = 0; j < 3; j++)
{
tmp[j] = ker[j] * 0.25;
tmp[3 + j] = -r1_1_6[j] + neg_r0_add_r2_x_1_6[j];
tmp[6 + j] = r1_1_6[j] + neg_r0_add_r2_x_1_6[j];
tmp[9 + j] = r1_1_12[j] + r0_1_4_add_r2_x_1_6[j];
tmp[12 + j] = -r1_1_12[j] + r0_1_4_add_r2_x_1_6[j];
tmp[15 + j] = ker[6 + j];
}
// gemm(6,3,3,G,ker,tmp); done
int idx;
for (int j = 0; j < 6; j++)
{
idx = j * 3;
neg_r0_add_r2_x_1_6[j] = -(tmp[idx] + tmp[idx + 2]) * s_1_6;
r0_1_4_add_r2_x_1_6[j] = (tmp[idx] * 0.25 + tmp[idx + 2]) * s_1_6;
r1_1_6[j] = tmp[idx + 1] * s_1_6;
r1_1_12[j] = r1_1_6[j] * 0.5;
}
for (int j = 0; j < 6; j++)
{
idx = j * 6;
trans_ker[idx] = tmp[j * 3] * 0.25;
trans_ker[idx + 1] = -r1_1_6[j] + neg_r0_add_r2_x_1_6[j];
trans_ker[idx + 2] = r1_1_6[j] + neg_r0_add_r2_x_1_6[j];
trans_ker[idx + 3] = r1_1_12[j] + r0_1_4_add_r2_x_1_6[j];
trans_ker[idx + 4] = -r1_1_12[j] + r0_1_4_add_r2_x_1_6[j];
trans_ker[idx + 5] = tmp[j * 3 + 2];
}
// gemm(6,6,3,tmp,GT,trans_ker); done
}
static inline void transform_kernel_f43_tile(struct ir_tensor* filter, float* trans_ker)
{
int outc = filter->dims[0];
int inc = filter->dims[1];
float* kernel = ( float* )filter->data;
float* ker_ptr = trans_ker;
for (int i = 0; i < outc; i++)
{
for (int j = 0; j < inc; j++)
{
trans_kernel_f43(( float* )(kernel + 9 * (j + i * inc)), ker_ptr);
ker_ptr += ELEM_SIZE;
}
}
}
// src [out_c][in_c][ELEM_SIZE]
// --> dst [out_c/PER_OUT_CHAN][ELEM_SIZE][in_c][PER_OUT_CHAN]
static inline void interleave_kernel(float* ker0, float* ker1, int out_c, int in_c)
{
float* ker1_ptr = ker1;
int p, i, j;
int nn_out = out_c / PER_OUT_CHAN;
for (p = 0; p < nn_out; p++)
{
int pp = p * PER_OUT_CHAN;
for (int s = 0; s < ELEM_SIZE; s++)
{
for (i = 0; i < in_c; i++)
{
for (j = 0; j < PER_OUT_CHAN; j++)
{
*ker1_ptr = ker0[((pp + j) * in_c + i) * ELEM_SIZE + s];
ker1_ptr++;
}
}
}
}
// cout 4
for (p = (nn_out * PER_OUT_CHAN); p < (out_c & -4); p += 4)
{
for (int s = 0; s < ELEM_SIZE; s++)
{
for (i = 0; i < in_c; i++)
{
for (j = 0; j < 4; j++)
{
*ker1_ptr = ker0[((p + j) * in_c + i) * ELEM_SIZE + s];
ker1_ptr++;
}
}
}
}
// cout 1
for (p = (out_c & -4); p < out_c; p++)
{
for (int s = 0; s < ELEM_SIZE; s++)
{
for (i = 0; i < in_c; i++)
{
*ker1_ptr = ker0[(p * in_c + i) * ELEM_SIZE + s];
ker1_ptr++;
}
}
}
}
static inline void pad_input1(const float* input, float* inp_padded, int inc, int inh, int inw, int padded_h,
int padded_w, int pad0, int pad1)
{
int padded_hw = padded_h * padded_w;
float* pad_ptr;
float* inp_ptr = ( float* )input;
int resi_h = padded_h - pad0 - inh;
int resi_w = padded_w - pad1 - inw;
for (int c = 0; c < inc; c++)
{
pad_ptr = inp_padded + c * padded_hw;
// pad h_top
memset(pad_ptr, 0, padded_w * pad0 * sizeof(float));
pad_ptr += pad0 * padded_w;
// pad h_mid
for (int h = 0; h < inh; h++)
{
// pad w_left
memset(pad_ptr, 0, pad1 * sizeof(float));
// pad w_mid
memcpy(pad_ptr + pad1, inp_ptr, inw * sizeof(float));
// pad w_end
memset(pad_ptr + pad1 + inw, 0, resi_w * sizeof(float));
inp_ptr += inw;
pad_ptr += padded_w;
}
// pad h_bottom
memset(pad_ptr, 0, padded_w * resi_h * sizeof(float));
}
}
static inline void trans_inp_1tile(float* input, float* inp_ptr, int ih, int jw, int c, int in_hw, int inw)
{
float* inp = ( float* )input + c * in_hw + ih * 4 * inw + jw * 4;
float* inp0 = inp;
float* inp1 = inp0 + inw;
float* inp2 = inp1 + inw;
float* inp3 = inp2 + inw;
float* inp4 = inp3 + inw;
float* inp5 = inp4 + inw;
float tmp[36] = {0};
float r1_add_r2[6];
float r3_add_r4[6];
float r1_minus_r2[6];
float r3_minus_r4[6];
float r4_minus_r2[6];
float r1_minus_r3[6];
for (int j = 0; j < 6; j++)
{
r1_add_r2[j] = inp1[j] + inp2[j];
r1_minus_r2[j] = inp1[j] - inp2[j];
r3_add_r4[j] = inp3[j] + inp4[j];
r3_minus_r4[j] = inp3[j] - inp4[j];
r4_minus_r2[j] = inp4[j] - inp2[j];
r1_minus_r3[j] = inp1[j] - inp3[j];
}
for (int j = 0; j < 6; j++)
{
tmp[j] = 4 * inp0[j] - 5 * inp2[j] + inp4[j];
tmp[6 + j] = r3_add_r4[j] - 4 * r1_add_r2[j];
tmp[12 + j] = 4 * r1_minus_r2[j] - r3_minus_r4[j];
tmp[18 + j] = r4_minus_r2[j] - 2 * r1_minus_r3[j];
tmp[24 + j] = r4_minus_r2[j] + 2 * r1_minus_r3[j];
tmp[30 + j] = 4 * inp1[j] - 5 * inp3[j] + inp5[j];
}
float r1_4_minus_r3[6];
float r4_minus_4_r2[6];
float r4_minus_r2_[6];
float r1_minus_r3_x2[6];
for (int j = 0; j < 6; j++)
{
r4_minus_r2_[j] = tmp[j * 6 + 4] - tmp[j * 6 + 2];
r1_4_minus_r3[j] = 4 * tmp[j * 6 + 1] - tmp[j * 6 + 3];
r4_minus_4_r2[j] = tmp[j * 6 + 4] - 4 * tmp[j * 6 + 2];
r1_minus_r3_x2[j] = 2 * (tmp[j * 6 + 1] - tmp[j * 6 + 3]);
}
for (int j = 0; j < 6; j++)
{
inp_ptr[j * 6] = 4 * tmp[j * 6] - 5 * tmp[j * 6 + 2] + tmp[j * 6 + 4];
inp_ptr[1 + j * 6] = r4_minus_4_r2[j] - r1_4_minus_r3[j];
inp_ptr[2 + j * 6] = r4_minus_4_r2[j] + r1_4_minus_r3[j];
inp_ptr[3 + j * 6] = r4_minus_r2_[j] - r1_minus_r3_x2[j];
inp_ptr[4 + j * 6] = r4_minus_r2_[j] + r1_minus_r3_x2[j];
inp_ptr[5 + j * 6] = 4 * tmp[j * 6 + 1] - 5 * tmp[j * 6 + 3] + tmp[j * 6 + 5];
}
}
static inline void trans_inp_4_cpu(float* inp, float* inp_ptr, int inw, int s_size)
{
float* inp0 = inp;
float* inp1 = inp0 + inw;
float* inp2 = inp1 + inw;
float* inp3 = inp2 + inw;
float* inp4 = inp3 + inw;
float* inp5 = inp4 + inw;
float mid[36 * 4] = {0};
float r4_minus_r2[24];
float r1_4_minus_r3[24];
float r4_minus_4_r2[24];
float r1_minus_r3_x2[24];
for (int i = 0; i < 6; i++)
{
// 0
mid[i * 4] = 4 * inp0[i] - 5 * inp2[i] + inp4[i];
mid[(30 + i) * 4] = 4 * inp1[i] - 5 * inp3[i] + inp5[i];
r1_minus_r3_x2[i * 4 + 0] = (inp1[i] - inp3[i]) * 2;
r1_4_minus_r3[i * 4 + 0] = 4 * inp1[i] - inp3[i];
r4_minus_4_r2[i * 4 + 0] = inp4[i] - 4 * inp2[i];
r4_minus_r2[i * 4 + 0] = inp4[i] - inp2[i];
// 1
mid[i * 4 + 1] = 4 * inp0[i + 4] - 5 * inp2[i + 4] + inp4[i + 4];
mid[(30 + i) * 4 + 1] = 4 * inp1[i + 4] - 5 * inp3[i + 4] + inp5[i + 4];
r1_minus_r3_x2[i * 4 + 1] = (inp1[i + 4] - inp3[i + 4]) * 2;
r1_4_minus_r3[i * 4 + 1] = 4 * inp1[i + 4] - inp3[i + 4];
r4_minus_4_r2[i * 4 + 1] = inp4[i + 4] - 4 * inp2[i + 4];
r4_minus_r2[i * 4 + 1] = inp4[i + 4] - inp2[i + 4];
// 2
mid[i * 4 + 2] = 4 * inp0[i + 8] - 5 * inp2[i + 8] + inp4[i + 8];
mid[(30 + i) * 4 + 2] = 4 * inp1[i + 8] - 5 * inp3[i + 8] + inp5[i + 8];
r1_minus_r3_x2[i * 4 + 2] = (inp1[i + 8] - inp3[i + 8]) * 2;
r1_4_minus_r3[i * 4 + 2] = 4 * inp1[i + 8] - inp3[i + 8];
r4_minus_4_r2[i * 4 + 2] = inp4[i + 8] - 4 * inp2[i + 8];
r4_minus_r2[i * 4 + 2] = inp4[i + 8] - inp2[i + 8];
// 3
mid[i * 4 + 3] = 4 * inp0[i + 12] - 5 * inp2[i + 12] + inp4[i + 12];
mid[(30 + i) * 4 + 3] = 4 * inp1[i + 12] - 5 * inp3[i + 12] + inp5[i + 12];
r1_minus_r3_x2[i * 4 + 3] = (inp1[i + 12] - inp3[i + 12]) * 2;
r1_4_minus_r3[i * 4 + 3] = 4 * inp1[i + 12] - inp3[i + 12];
r4_minus_4_r2[i * 4 + 3] = inp4[i + 12] - 4 * inp2[i + 12];
r4_minus_r2[i * 4 + 3] = inp4[i + 12] - inp2[i + 12];
}
//====================================================================
// for(int i = 0; i < 6; i++)
// {
// for(int k = 0; k < 4; k++)
// {
// mid[(6 + i) * 4 + k] = r4_minus_4_r2[i * 4 + k] - r1_4_minus_r3[i * 4 + k];
// mid[(12 + i) * 4 + k] = r4_minus_4_r2[i * 4 + k] + r1_4_minus_r3[i * 4 + k];
// mid[(18 + i) * 4 + k] = r4_minus_r2[i * 4 + k] - r1_minus_r3_x2[i * 4 + k];
// mid[(24 + i) * 4 + k] = r4_minus_r2[i * 4 + k] + r1_minus_r3_x2[i * 4 + k];
// }
// }
float32x4_t r0 = vld1q_f32(r4_minus_4_r2);
float32x4_t r1 = vld1q_f32(r4_minus_4_r2 + 4);
float32x4_t r2 = vld1q_f32(r4_minus_4_r2 + 8);
float32x4_t r3 = vld1q_f32(r4_minus_4_r2 + 12);
float32x4_t r4 = vld1q_f32(r4_minus_4_r2 + 16);
float32x4_t r5 = vld1q_f32(r4_minus_4_r2 + 20);
float32x4_t r0_ = vld1q_f32(r1_4_minus_r3);
float32x4_t r1_ = vld1q_f32(r1_4_minus_r3 + 4);
float32x4_t r2_ = vld1q_f32(r1_4_minus_r3 + 8);
float32x4_t r3_ = vld1q_f32(r1_4_minus_r3 + 12);
float32x4_t r4_ = vld1q_f32(r1_4_minus_r3 + 16);
float32x4_t r5_ = vld1q_f32(r1_4_minus_r3 + 20);
float32x4_t line0_0 = vld1q_f32(mid);
float32x4_t line0_1 = vld1q_f32(mid + 4);
float32x4_t line0_2 = vld1q_f32(mid + 8);
float32x4_t line0_3 = vld1q_f32(mid + 12);
float32x4_t line0_4 = vld1q_f32(mid + 16);
float32x4_t line0_5 = vld1q_f32(mid + 20);
float32x4_t line1_0 = vsubq_f32(r0, r0_); // mid[(6 + i) * 4 + k] [1][0]
float32x4_t line1_1 = vsubq_f32(r1, r1_); // mid[(6 + i) * 4 + k] [1][1]
float32x4_t line1_2 = vsubq_f32(r2, r2_); // mid[(6 + i) * 4 + k] [1][2]
float32x4_t line1_3 = vsubq_f32(r3, r3_); // mid[(6 + i) * 4 + k] [1][3]
float32x4_t line1_4 = vsubq_f32(r4, r4_); // mid[(6 + i) * 4 + k] [1][4]
float32x4_t line1_5 = vsubq_f32(r5, r5_); // mid[(6 + i) * 4 + k] [1][5]
float32x4_t line2_0 = vaddq_f32(r0, r0_); // mid[(12 + i) * 4 + k] [2][0]
float32x4_t line2_1 = vaddq_f32(r1, r1_); // mid[(12 + i) * 4 + k] [2][1]
float32x4_t line2_2 = vaddq_f32(r2, r2_); // mid[(12 + i) * 4 + k] [2][2]
float32x4_t line2_3 = vaddq_f32(r3, r3_); // mid[(12 + i) * 4 + k] [2][3]
float32x4_t line2_4 = vaddq_f32(r4, r4_); // mid[(12 + i) * 4 + k] [2][4]
float32x4_t line2_5 = vaddq_f32(r5, r5_); // mid[(12 + i) * 4 + k] [2][5]
r0 = vld1q_f32(r4_minus_r2);
r1 = vld1q_f32(r4_minus_r2 + 4);
r2 = vld1q_f32(r4_minus_r2 + 8);
r3 = vld1q_f32(r4_minus_r2 + 12);
r4 = vld1q_f32(r4_minus_r2 + 16);
r5 = vld1q_f32(r4_minus_r2 + 20);
r0_ = vld1q_f32(r1_minus_r3_x2);
r1_ = vld1q_f32(r1_minus_r3_x2 + 4);
r2_ = vld1q_f32(r1_minus_r3_x2 + 8);
r3_ = vld1q_f32(r1_minus_r3_x2 + 12);
r4_ = vld1q_f32(r1_minus_r3_x2 + 16);
r5_ = vld1q_f32(r1_minus_r3_x2 + 20);
float32x4_t line5_0 = vld1q_f32(mid + 120);
float32x4_t line5_1 = vld1q_f32(mid + 124);
float32x4_t line5_2 = vld1q_f32(mid + 128);
float32x4_t line5_3 = vld1q_f32(mid + 132);
float32x4_t line5_4 = vld1q_f32(mid + 136);
float32x4_t line5_5 = vld1q_f32(mid + 140);
float32x4_t line3_0 = vsubq_f32(r0, r0_); // mid[(18 + i) * 4 + k] [3][0]
float32x4_t line3_1 = vsubq_f32(r1, r1_); // mid[(18 + i) * 4 + k] [3][1]
float32x4_t line3_2 = vsubq_f32(r2, r2_); // mid[(18 + i) * 4 + k] [3][2]
float32x4_t line3_3 = vsubq_f32(r3, r3_); // mid[(18 + i) * 4 + k] [3][3]
float32x4_t line3_4 = vsubq_f32(r4, r4_); // mid[(18 + i) * 4 + k] [3][4]
float32x4_t line3_5 = vsubq_f32(r5, r5_); // mid[(18 + i) * 4 + k] [3][5]
float32x4_t line4_0 = vaddq_f32(r0, r0_); // mid[(24 + i) * 4 + k] [4][0]
float32x4_t line4_1 = vaddq_f32(r1, r1_); // mid[(24 + i) * 4 + k] [4][1]
float32x4_t line4_2 = vaddq_f32(r2, r2_); // mid[(24 + i) * 4 + k] [4][2]
float32x4_t line4_3 = vaddq_f32(r3, r3_); // mid[(24 + i) * 4 + k] [4][3]
float32x4_t line4_4 = vaddq_f32(r4, r4_); // mid[(24 + i) * 4 + k] [4][4]
float32x4_t line4_5 = vaddq_f32(r5, r5_); // mid[(24 + i) * 4 + k] [4][5]
// r4_minus_r2[i * 4 + k] i=0 = mid[0][4]
r0 = vsubq_f32(line0_4, line0_2);
r1 = vsubq_f32(line1_4, line1_2);
r2 = vsubq_f32(line2_4, line2_2);
r3 = vsubq_f32(line3_4, line3_2);
r4 = vsubq_f32(line4_4, line4_2);
r5 = vsubq_f32(line5_4, line5_2);
r0_ = vsubq_f32(line0_1, line0_3);
r1_ = vsubq_f32(line1_1, line1_3);
r2_ = vsubq_f32(line2_1, line2_3);
r3_ = vsubq_f32(line3_1, line3_3);
r4_ = vsubq_f32(line4_1, line4_3);
r5_ = vsubq_f32(line5_1, line5_3);
float32x4_t const2 = vdupq_n_f32(2.f);
r0_ = vmulq_f32(r0_, const2);
r1_ = vmulq_f32(r1_, const2);
r2_ = vmulq_f32(r2_, const2);
r3_ = vmulq_f32(r3_, const2);
r4_ = vmulq_f32(r4_, const2);
r5_ = vmulq_f32(r5_, const2);
vst1q_f32(inp_ptr + s_size * 3, vsubq_f32(r0, r0_)); // inp_ptr[ s_size * (3 + i * 6)]
vst1q_f32(inp_ptr + s_size * 9, vsubq_f32(r1, r1_)); // inp_ptr[ s_size * (3 + i * 6)]
vst1q_f32(inp_ptr + s_size * 15, vsubq_f32(r2, r2_)); // inp_ptr[ s_size * (3 + i * 6)]
vst1q_f32(inp_ptr + s_size * 21, vsubq_f32(r3, r3_)); // inp_ptr[ s_size * (3 + i * 6)]
vst1q_f32(inp_ptr + s_size * 27, vsubq_f32(r4, r4_)); // inp_ptr[ s_size * (3 + i * 6)]
vst1q_f32(inp_ptr + s_size * 33, vsubq_f32(r5, r5_)); // inp_ptr[ s_size * (3 + i * 6)]
vst1q_f32(inp_ptr + s_size * 4, vaddq_f32(r0, r0_)); // inp_ptr[ s_size * (4 + i * 6)]
vst1q_f32(inp_ptr + s_size * 10, vaddq_f32(r1, r1_)); // inp_ptr[ s_size * (4 + i * 6)]
vst1q_f32(inp_ptr + s_size * 16, vaddq_f32(r2, r2_)); // inp_ptr[ s_size * (4 + i * 6)]
vst1q_f32(inp_ptr + s_size * 22, vaddq_f32(r3, r3_)); // inp_ptr[ s_size * (4 + i * 6)]
vst1q_f32(inp_ptr + s_size * 28, vaddq_f32(r4, r4_)); // inp_ptr[ s_size * (4 + i * 6)]
vst1q_f32(inp_ptr + s_size * 34, vaddq_f32(r5, r5_)); // inp_ptr[ s_size * (4 + i * 6)]
float32x4_t const4 = vdupq_n_f32(4.f);
float32x4_t const5 = vdupq_n_f32(-5.f);
r0_ = vmulq_f32(line0_1, const4); // line 1*4 ========
r1_ = vmulq_f32(line1_1, const4);
r2_ = vmulq_f32(line2_1, const4);
r3_ = vmulq_f32(line3_1, const4);
r4_ = vmulq_f32(line4_1, const4);
r5_ = vmulq_f32(line5_1, const4);
float32x4_t rr0_ = vsubq_f32(r0_, line0_3); // line1*4-line3
float32x4_t rr1_ = vsubq_f32(r1_, line1_3);
float32x4_t rr2_ = vsubq_f32(r2_, line2_3);
float32x4_t rr3_ = vsubq_f32(r3_, line3_3);
float32x4_t rr4_ = vsubq_f32(r4_, line4_3);
float32x4_t rr5_ = vsubq_f32(r5_, line5_3);
r0 = vmulq_f32(line0_2, const4);
r1 = vmulq_f32(line1_2, const4);
r2 = vmulq_f32(line2_2, const4);
r3 = vmulq_f32(line3_2, const4);
r4 = vmulq_f32(line4_2, const4);
r5 = vmulq_f32(line5_2, const4);
r0 = vsubq_f32(line0_4, r0); // line4 -4*line2
r1 = vsubq_f32(line1_4, r1);
r2 = vsubq_f32(line2_4, r2);
r3 = vsubq_f32(line3_4, r3);
r4 = vsubq_f32(line4_4, r4);
r5 = vsubq_f32(line5_4, r5);
vst1q_f32(inp_ptr + s_size * 1, vsubq_f32(r0, rr0_)); // inp_ptr[ s_size * (1 + i * 6)]
vst1q_f32(inp_ptr + s_size * 7, vsubq_f32(r1, rr1_)); // inp_ptr[ s_size * (1 + i * 6)]
vst1q_f32(inp_ptr + s_size * 13, vsubq_f32(r2, rr2_)); // inp_ptr[ s_size * (1 + i * 6)]
vst1q_f32(inp_ptr + s_size * 19, vsubq_f32(r3, rr3_)); // inp_ptr[ s_size * (1 + i * 6)]
vst1q_f32(inp_ptr + s_size * 25, vsubq_f32(r4, rr4_)); // inp_ptr[ s_size * (1 + i * 6)]
vst1q_f32(inp_ptr + s_size * 31, vsubq_f32(r5, rr5_)); // inp_ptr[ s_size * (1 + i * 6)]
vst1q_f32(inp_ptr + s_size * 2, vaddq_f32(r0, rr0_)); // inp_ptr[ s_size * (2 + i * 6)]
vst1q_f32(inp_ptr + s_size * 8, vaddq_f32(r1, rr1_)); // inp_ptr[ s_size * (2 + i * 6)]
vst1q_f32(inp_ptr + s_size * 14, vaddq_f32(r2, rr2_)); // inp_ptr[ s_size * (2 + i * 6)]
vst1q_f32(inp_ptr + s_size * 20, vaddq_f32(r3, rr3_)); // inp_ptr[ s_size * (2 + i * 6)]
vst1q_f32(inp_ptr + s_size * 26, vaddq_f32(r4, rr4_)); // inp_ptr[ s_size * (2 + i * 6)]
vst1q_f32(inp_ptr + s_size * 32, vaddq_f32(r5, rr5_)); // inp_ptr[ s_size * (2 + i * 6)]
r0_ = vaddq_f32(line0_5, r0_); // 5 + 1*4
r1_ = vaddq_f32(line1_5, r1_);
r2_ = vaddq_f32(line2_5, r2_);
r3_ = vaddq_f32(line3_5, r3_);
r4_ = vaddq_f32(line4_5, r4_);
r5_ = vaddq_f32(line5_5, r5_);
r0 = vmulq_f32(line0_3, const5);
r1 = vmulq_f32(line1_3, const5);
r2 = vmulq_f32(line2_3, const5);
r3 = vmulq_f32(line3_3, const5);
r4 = vmulq_f32(line4_3, const5);
r5 = vmulq_f32(line5_3, const5);
vst1q_f32(inp_ptr + s_size * 5, vaddq_f32(r0, r0_)); // inp_ptr[ s_size * (5 + i * 6)]
vst1q_f32(inp_ptr + s_size * 11, vaddq_f32(r1, r1_)); // inp_ptr[ s_size * (5 + i * 6)]
vst1q_f32(inp_ptr + s_size * 17, vaddq_f32(r2, r2_)); // inp_ptr[ s_size * (5 + i * 6)]
vst1q_f32(inp_ptr + s_size * 23, vaddq_f32(r3, r3_)); // inp_ptr[ s_size * (5 + i * 6)]
vst1q_f32(inp_ptr + s_size * 29, vaddq_f32(r4, r4_)); // inp_ptr[ s_size * (5 + i * 6)]
vst1q_f32(inp_ptr + s_size * 35, vaddq_f32(r5, r5_)); // inp_ptr[ s_size * (5 + i * 6)]
r0 = vmulq_f32(line0_0, const4);
r1 = vmulq_f32(line1_0, const4);
r2 = vmulq_f32(line2_0, const4);
r3 = vmulq_f32(line3_0, const4);
r4 = vmulq_f32(line4_0, const4);
r5 = vmulq_f32(line5_0, const4);
r0_ = vmulq_f32(line0_2, const5);
r1_ = vmulq_f32(line1_2, const5);
r2_ = vmulq_f32(line2_2, const5);
r3_ = vmulq_f32(line3_2, const5);
r4_ = vmulq_f32(line4_2, const5);
r5_ = vmulq_f32(line5_2, const5);
r0 = vaddq_f32(r0, line0_4);
r1 = vaddq_f32(r1, line1_4);
r2 = vaddq_f32(r2, line2_4);
r3 = vaddq_f32(r3, line3_4);
r4 = vaddq_f32(r4, line4_4);
r5 = vaddq_f32(r5, line5_4);
vst1q_f32(inp_ptr + s_size * 0, vaddq_f32(r0, r0_)); // inp_ptr[ s_size * (1 + i * 6)]
vst1q_f32(inp_ptr + s_size * 6, vaddq_f32(r1, r1_)); // inp_ptr[ s_size * (1 + i * 6)]
vst1q_f32(inp_ptr + s_size * 12, vaddq_f32(r2, r2_)); // inp_ptr[ s_size * (1 + i * 6)]
vst1q_f32(inp_ptr + s_size * 18, vaddq_f32(r3, r3_)); // inp_ptr[ s_size * (1 + i * 6)]
vst1q_f32(inp_ptr + s_size * 24, vaddq_f32(r4, r4_)); // inp_ptr[ s_size * (1 + i * 6)]
vst1q_f32(inp_ptr + s_size * 30, vaddq_f32(r5, r5_)); // inp_ptr[ s_size * (1 + i * 6)]
// for(int i = 0; i < 6; i++)
// {
// for(int k = 0; k < 4; k++)
// {
// r4_minus_r2[i * 4 + k] = mid[(i * 6 + 4) * 4 + k] - mid[(i * 6 + 2) * 4 + k];
// r1_4_minus_r3[i * 4 + k] = 4 * mid[(i * 6 + 1) * 4 + k] - mid[(i * 6 + 3) * 4 + k];
// r4_minus_4_r2[i * 4 + k] = mid[(i * 6 + 4) * 4 + k] - 4 * mid[(i * 6 + 2) * 4 + k];
// r1_minus_r3_x2[i * 4 + k] = 2 * (mid[(i * 6 + 1) * 4 + k] - mid[(i * 6 + 3) * 4 + k]);
// }
// }
// for(int i = 1; i < 2; i++)
// {
// for(int k = 0; k < 4; k++)
// {
// inp_ptr[k + s_size * (i * 6)] =
// 4 * mid[(i * 6) * 4 + k] - 5 * mid[(i * 6 + 2) * 4 + k] + mid[(i * 6 + 4) * 4 + k];
// // // inp_ptr[k + s_size * (1 + i * 6)] = r4_minus_4_r2[i * 4 + k] - r1_4_minus_r3[i * 4 + k];
// // // inp_ptr[k + s_size * (2 + i * 6)] = r4_minus_4_r2[i * 4 + k] + r1_4_minus_r3[i * 4 + k];
// // // inp_ptr[k + s_size * (3 + i * 6)] = r4_minus_r2[i * 4 + k] - r1_minus_r3_x2[i * 4 + k];
// // // inp_ptr[k + s_size * (4 + i * 6)] = r4_minus_r2[i * 4 + k] + r1_minus_r3_x2[i * 4 + k];
// // // inp_ptr[k + s_size * (5 + i * 6)] =
// // // 4 * mid[(i * 6 + 1) * 4 + k] - 5 * mid[(i * 6 + 3) * 4 + k] + mid[(i * 6 + 5) * 4 + k];
// }
// }
}
// trans_input [block_hw/4][ELEM_SIZE][inc][4]
static inline void tran_input_4block(const float* input, float* trans_inp, int inc, int block_h, int block_w, int inh,
int inw)
{
int in_hw = inh * inw;
int block_hw = block_h * block_w;
int nn_block = block_hw >> 2;
int idxh[4];
int idxw[4];
for (int ib = 0; ib < nn_block; ib++)
{
float* inp_ptr_4tile = trans_inp + ib * 4 * ELEM_SIZE * inc;
idxh[0] = (ib * 4) / block_w;
idxh[1] = (ib * 4 + 1) / block_w;
idxh[2] = (ib * 4 + 2) / block_w;
idxh[3] = (ib * 4 + 3) / block_w;
idxw[0] = (ib * 4) % block_w;
idxw[1] = (ib * 4 + 1) % block_w;
idxw[2] = (ib * 4 + 2) % block_w;
idxw[3] = (ib * 4 + 3) % block_w;
if (idxh[0] == idxh[3])
{
float* temp_inp_ptr = ( float* )(input + idxh[0] * 4 * inw + idxw[0] * 4);
for (int c = 0; c < inc; c++)
{
#ifdef __aarch64__
float ker00[4] = {1, 2, 4, 5};
tran_inp_4(temp_inp_ptr, inp_ptr_4tile + 4 * c, ker00, inw, inc * 16, in_hw);
temp_inp_ptr += in_hw;
#else
trans_inp_4_cpu(temp_inp_ptr, inp_ptr_4tile + c * 4, inw, inc * 4);
temp_inp_ptr += in_hw;
#endif
}
}
else
{
float buffer0[inc * ELEM_SIZE * 4];
float* buffer = buffer0;
for (int c = 0; c < inc; c++)
{
trans_inp_1tile(( float* )input, buffer, idxh[0], idxw[0], c, in_hw, inw);
buffer += ELEM_SIZE;
trans_inp_1tile(( float* )input, buffer, idxh[1], idxw[1], c, in_hw, inw);
buffer += ELEM_SIZE;
trans_inp_1tile(( float* )input, buffer, idxh[2], idxw[2], c, in_hw, inw);
buffer += ELEM_SIZE;
trans_inp_1tile(( float* )input, buffer, idxh[3], idxw[3], c, in_hw, inw);
buffer += ELEM_SIZE;
}
// interleave
float* tmp_inp = inp_ptr_4tile;
for (int s = 0; s < ELEM_SIZE; s++)
{
for (int i = 0; i < inc; i++)
{
for (int j = 0; j < 4; j++)
{
*tmp_inp = buffer0[i * ELEM_SIZE * 4 + j * ELEM_SIZE + s];
tmp_inp++;
}
}
}
// end interleave
}
}
}
static inline void tran_input_resi_block(const float* input, float* trans_inp, int inc, int nn_block, int resi_block,
int block_hw, int block_w, int in_hw, int inw)
{
float* inp_ptr = trans_inp + nn_block * 4 * ELEM_SIZE * inc;
for (int ib = resi_block; ib < block_hw; ib++)
{
float buffer0[ELEM_SIZE * inc];
float* buffer = buffer0;
for (int c = 0; c < inc; c++)
{
int ih = ib / block_w;
int jw = ib % block_w;
trans_inp_1tile(( float* )input, buffer, ih, jw, c, in_hw, inw);
buffer += ELEM_SIZE;
}
// interleave
for (int s = 0; s < ELEM_SIZE; s++)
{
for (int i = 0; i < inc; i++)
{
*inp_ptr = buffer0[i * ELEM_SIZE + s];
inp_ptr++;
}
}
// end interleave
}
}
static inline float do_activation(float value, int activation)
{
if (activation >= 0)
value = WINO_MAX(value, 0);
if (activation == 6)
value = WINO_MIN(value, 6);
return value;
}
static inline void trans_output_f43(const float* mid, float* out, int outw, const float* bias_ptr, int activation)
{
/*
float AT[24]={
1., 1., 1., 1., 1., 0.,
0., 1., -1., 2., -2., 0.,
0., 1., 1., 4., 4., 0.,
0., 1., -1., 8., -8., 1.
};
float A[24]={
1., 0., 0., 0.,
1., 1., 1., 1.,
1., -1., 1., -1.,
1., 2., 4., 8.,
1., -2., 4., -8.,
0., 0., 0., 1.
};
*/
float tmp[24] = {0};
float r1_add_r2[6];
float r1_minus_r2[6];
float r3_add_r4[6];
float r3_minus_r4_x2[6];
for (int j = 0; j < 6; j++)
{
r1_add_r2[j] = mid[6 * 1 + j] + mid[6 * 2 + j];
r1_minus_r2[j] = mid[6 * 1 + j] - mid[6 * 2 + j];
r3_add_r4[j] = mid[6 * 3 + j] + mid[6 * 4 + j];
r3_minus_r4_x2[j] = (mid[6 * 3 + j] - mid[6 * 4 + j]) * 2;
}
for (int j = 0; j < 6; j++)
{
tmp[j] = mid[j] + r1_add_r2[j] + r3_add_r4[j];
tmp[6 + j] = r1_minus_r2[j] + r3_minus_r4_x2[j];
tmp[12 + j] = r1_add_r2[j] + 4 * r3_add_r4[j];
tmp[18 + j] = r1_minus_r2[j] + 4 * r3_minus_r4_x2[j] + mid[6 * 5 + j];
}
float* out0 = out;
float* out1 = out0 + outw;
float* out2 = out1 + outw;
float* out3 = out2 + outw;
float _r1_add_r2[4];
float _r1_minus_r2[4];
float _r3_add_r4[4];
float _r3_minus_r4_x2[4];
int idx;
for (int j = 0; j < 4; j++)
{
idx = 6 * j;
_r1_add_r2[j] = tmp[idx + 1] + tmp[idx + 2];
_r1_minus_r2[j] = tmp[idx + 1] - tmp[idx + 2];
_r3_add_r4[j] = tmp[idx + 3] + tmp[idx + 4];
_r3_minus_r4_x2[j] = (tmp[idx + 3] - tmp[idx + 4]) * 2;
}
if (bias_ptr)
{
float bias = bias_ptr[0];
out0[0] = do_activation(tmp[0 * 6] + _r1_add_r2[0] + _r3_add_r4[0] + bias, activation);
out1[0] = do_activation(tmp[1 * 6] + _r1_add_r2[1] + _r3_add_r4[1] + bias, activation);
out2[0] = do_activation(tmp[2 * 6] + _r1_add_r2[2] + _r3_add_r4[2] + bias, activation);
out3[0] = do_activation(tmp[3 * 6] + _r1_add_r2[3] + _r3_add_r4[3] + bias, activation);
out0[1] = do_activation(_r1_minus_r2[0] + _r3_minus_r4_x2[0] + bias, activation);
out1[1] = do_activation(_r1_minus_r2[1] + _r3_minus_r4_x2[1] + bias, activation);
out2[1] = do_activation(_r1_minus_r2[2] + _r3_minus_r4_x2[2] + bias, activation);
out3[1] = do_activation(_r1_minus_r2[3] + _r3_minus_r4_x2[3] + bias, activation);
out0[2] = do_activation(_r1_add_r2[0] + 4 * _r3_add_r4[0] + bias, activation);
out1[2] = do_activation(_r1_add_r2[1] + 4 * _r3_add_r4[1] + bias, activation);
out2[2] = do_activation(_r1_add_r2[2] + 4 * _r3_add_r4[2] + bias, activation);
out3[2] = do_activation(_r1_add_r2[3] + 4 * _r3_add_r4[3] + bias, activation);
out0[3] = do_activation(_r1_minus_r2[0] + 4 * _r3_minus_r4_x2[0] + tmp[0 * 6 + 5] + bias, activation);
out1[3] = do_activation(_r1_minus_r2[1] + 4 * _r3_minus_r4_x2[1] + tmp[1 * 6 + 5] + bias, activation);
out2[3] = do_activation(_r1_minus_r2[2] + 4 * _r3_minus_r4_x2[2] + tmp[2 * 6 + 5] + bias, activation);
out3[3] = do_activation(_r1_minus_r2[3] + 4 * _r3_minus_r4_x2[3] + tmp[3 * 6 + 5] + bias, activation);
}
else
{
out0[0] = do_activation(tmp[0 * 6] + _r1_add_r2[0] + _r3_add_r4[0], activation);
out1[0] = do_activation(tmp[1 * 6] + _r1_add_r2[1] + _r3_add_r4[1], activation);
out2[0] = do_activation(tmp[2 * 6] + _r1_add_r2[2] + _r3_add_r4[2], activation);
out3[0] = do_activation(tmp[3 * 6] + _r1_add_r2[3] + _r3_add_r4[3], activation);
out0[1] = do_activation(_r1_minus_r2[0] + _r3_minus_r4_x2[0], activation);
out1[1] = do_activation(_r1_minus_r2[1] + _r3_minus_r4_x2[1], activation);
out2[1] = do_activation(_r1_minus_r2[2] + _r3_minus_r4_x2[2], activation);
out3[1] = do_activation(_r1_minus_r2[3] + _r3_minus_r4_x2[3], activation);
out0[2] = do_activation(_r1_add_r2[0] + 4 * _r3_add_r4[0], activation);
out1[2] = do_activation(_r1_add_r2[1] + 4 * _r3_add_r4[1], activation);
out2[2] = do_activation(_r1_add_r2[2] + 4 * _r3_add_r4[2], activation);
out3[2] = do_activation(_r1_add_r2[3] + 4 * _r3_add_r4[3], activation);
out0[3] = do_activation(_r1_minus_r2[0] + 4 * _r3_minus_r4_x2[0] + tmp[0 * 6 + 5], activation);
out1[3] = do_activation(_r1_minus_r2[1] + 4 * _r3_minus_r4_x2[1] + tmp[1 * 6 + 5], activation);
out2[3] = do_activation(_r1_minus_r2[2] + 4 * _r3_minus_r4_x2[2] + tmp[2 * 6 + 5], activation);
out3[3] = do_activation(_r1_minus_r2[3] + 4 * _r3_minus_r4_x2[3] + tmp[3 * 6 + 5], activation);
}
}
static inline void trans_output_f43_ordinary(const float* mid, float* out, const float* bias_ptr)
{
/*
float AT[24]={
1., 1., 1., 1., 1., 0.,
0., 1., -1., 2., -2., 0.,
0., 1., 1., 4., 4., 0.,
0., 1., -1., 8., -8., 1.
};
float A[24]={
1., 0., 0., 0.,
1., 1., 1., 1.,
1., -1., 1., -1.,
1., 2., 4., 8.,
1., -2., 4., -8.,
0., 0., 0., 1.
};
*/
float tmp[24] = {0};
float r1_add_r2[6];
float r1_minus_r2[6];
float r3_add_r4[6];
float r3_minus_r4_x2[6];
for (int j = 0; j < 6; j++)
{
r1_add_r2[j] = mid[6 * 1 + j] + mid[6 * 2 + j];
r1_minus_r2[j] = mid[6 * 1 + j] - mid[6 * 2 + j];
r3_add_r4[j] = mid[6 * 3 + j] + mid[6 * 4 + j];
r3_minus_r4_x2[j] = (mid[6 * 3 + j] - mid[6 * 4 + j]) * 2;
}
for (int j = 0; j < 6; j++)
{
tmp[j] = mid[j] + r1_add_r2[j] + r3_add_r4[j];
tmp[6 + j] = r1_minus_r2[j] + r3_minus_r4_x2[j];
tmp[12 + j] = r1_add_r2[j] + 4 * r3_add_r4[j];
tmp[18 + j] = r1_minus_r2[j] + 4 * r3_minus_r4_x2[j] + mid[6 * 5 + j];
}
float _r1_add_r2[4];
float _r1_minus_r2[4];
float _r3_add_r4[4];
float _r3_minus_r4_x2[4];
int idx;
for (int j = 0; j < 4; j++)
{
idx = 6 * j;
_r1_add_r2[j] = tmp[idx + 1] + tmp[idx + 2];
_r1_minus_r2[j] = tmp[idx + 1] - tmp[idx + 2];
_r3_add_r4[j] = tmp[idx + 3] + tmp[idx + 4];
_r3_minus_r4_x2[j] = (tmp[idx + 3] - tmp[idx + 4]) * 2;
}
if (bias_ptr)
{
float bias = bias_ptr[0];
for (int j = 0; j < 4; j++)
{
idx = j * 4;
out[idx] = bias + tmp[j * 6] + _r1_add_r2[j] + _r3_add_r4[j];
out[idx + 1] = bias + _r1_minus_r2[j] + _r3_minus_r4_x2[j];
out[idx + 2] = bias + _r1_add_r2[j] + 4 * _r3_add_r4[j];
out[idx + 3] = bias + _r1_minus_r2[j] + 4 * _r3_minus_r4_x2[j] + tmp[j * 6 + 5];
}
}
else
{
for (int j = 0; j < 4; j++)
{
idx = j * 4;
out[idx] = tmp[j * 6] + _r1_add_r2[j] + _r3_add_r4[j];
out[idx + 1] = _r1_minus_r2[j] + _r3_minus_r4_x2[j];
out[idx + 2] = _r1_add_r2[j] + 4 * _r3_add_r4[j];
out[idx + 3] = _r1_minus_r2[j] + 4 * _r3_minus_r4_x2[j] + tmp[j * 6 + 5];
}
}
}
static inline void transform_output_f43_1tile(const float* buffer_ptr, float* out, int p_idx, int idx_blockhw,
int block_h, int block_w, int out_hw, int outw, int resi_h, int resi_w,
int KER_COUT_UNIT_, const float* bias, int activation)
{
float tmp_buffer[TILE * TILE];
const float* bias_ptr = NULL;
for (int p = 0; p < KER_COUT_UNIT_; p++)
{
int cout_idx = p_idx + p;
if (bias)
{
bias_ptr = (bias + cout_idx);
}
float* out_ptr = out + cout_idx * out_hw;
int i_h = idx_blockhw / block_w;
int j_w = idx_blockhw % block_w;
if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) ||
(resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1)))
{
trans_output_f43(buffer_ptr, out_ptr + (i_h * TILE * outw + j_w * TILE), outw, bias_ptr, activation);
}
else
{
int ret_h = TILE - resi_h;
if (i_h < block_h - 1)
ret_h = TILE;
int ret_w = TILE - resi_w;
if (j_w < block_w - 1)
ret_w = TILE;
// tmp_buffer
trans_output_f43_ordinary(buffer_ptr, tmp_buffer, bias_ptr);
float* out_pointer = out_ptr + (i_h * TILE * outw + j_w * TILE);
for (int hh = 0; hh < ret_h; hh++)
{
for (int ww = 0; ww < ret_w; ww++)
{
out_pointer[hh * outw + ww] = do_activation(tmp_buffer[hh * TILE + ww], activation);
}
}
}
buffer_ptr += ELEM_SIZE;
}
}
static inline void transform_output_f43_4tile(float* buffer_ptr, float* out, int p_idx, int block_idx, int block_h,
int block_w, int outh, int outw, int resi_h, int resi_w,
int KER_COUT_UNIT_, const float* bias, int activation)
{
int out_hw = outh * outw;
float tmp_buffer[TILE * TILE];
int idx_h[4];
int idx_w[4];
idx_h[0] = (block_idx) / block_w;
idx_h[1] = (block_idx + 1) / block_w;
idx_h[2] = (block_idx + 2) / block_w;
idx_h[3] = (block_idx + 3) / block_w;
idx_w[0] = (block_idx) % block_w;
idx_w[1] = (block_idx + 1) % block_w;
idx_w[2] = (block_idx + 2) % block_w;
idx_w[3] = (block_idx + 3) % block_w;
float* bias_ptr = NULL;
for (int p = 0; p < KER_COUT_UNIT_; p++)
{
int cout_idx = p_idx + p;
float* out_ptr = out + cout_idx * out_hw;
if (bias)
{
bias_ptr = ( float* )bias + cout_idx;
}
for (int ii = 0; ii < 4; ii++)
{
int i_h = idx_h[ii];
int j_w = idx_w[ii];
if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) ||
(resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1)))
{
trans_output_f43(buffer_ptr, out_ptr + (i_h * TILE * outw + j_w * TILE), outw, bias_ptr, activation);
} // direct use_out_ptr
else
{
int ret_h = TILE - resi_h;
if (i_h < block_h - 1)
ret_h = TILE;
int ret_w = TILE - resi_w;
if (j_w < block_w - 1)
ret_w = TILE;
// tmp_buffer
trans_output_f43_ordinary(buffer_ptr, tmp_buffer, bias_ptr);
float* out_pointer = out_ptr + (i_h * TILE * outw + j_w * TILE);
for (int hh = 0; hh < ret_h; hh++)
{
for (int ww = 0; ww < ret_w; ww++)
{
out_pointer[hh * outw + ww] = do_activation(tmp_buffer[hh * 4 + ww], activation);
}
}
} // end else, tmp_buff
buffer_ptr += ELEM_SIZE;
}
}
}
// trans_input [block_hw/4][ELEM_SIZE][inc][4]
// kernel [out_c/PER_OUT_CHAN][ELEM_SIZE][in_c][PER_OUT_CHAN]
static void wino_sgemm_set(const float* ker, const float* inp, float* output, const float* bias, int cin,
int cout_end, int block_h, int block_w, int out_h, int out_w, int resi_h,
int resi_w, int activation, int num_thread, int cpu_affinity)
{
int flag_outw = 1;
if (out_w < 16)
flag_outw = 0;
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < (cout_end & -PER_OUT_CHAN); p += PER_OUT_CHAN)
{
int out_hw = out_w * out_h;
int block_hw = block_h * block_w;
const float* ker_ptr = ker + p * ELEM_SIZE * cin;
int i = 0;
for (; i < (block_hw & -4); i += 4)
{
const float* inp_ptr = inp + i * ELEM_SIZE * cin;
float out_buffer[PER_OUT_CHAN * 4 * ELEM_SIZE];
#ifdef __aarch64__
int idx_h[4];
int idx_w[4];
idx_h[0] = (i) / block_w;
idx_h[1] = (i + 1) / block_w;
idx_h[2] = (i + 2) / block_w;
idx_h[3] = (i + 3) / block_w;
idx_w[0] = (i) % block_w;
idx_w[1] = (i + 1) % block_w;
idx_w[2] = (i + 2) % block_w;
idx_w[3] = (i + 3) % block_w;
int wino_out_4_tiles = 0;
int mulitplier = PER_OUT_CHAN;
if (flag_outw)
{
if ((idx_h[0] == idx_h[3]) && (idx_h[0] < (block_h - 1)) && (idx_w[3] < (block_w - 1)))
{
wino_out_4_tiles = 1;
mulitplier = 1;
}
}
for (int s = 0; s < ELEM_SIZE; s++)
{
wino_sgemm_4x16_A72(out_buffer + s * 4 * mulitplier, inp_ptr + s * 4 * cin, ker_ptr + s * PER_OUT_CHAN * cin, cin, wino_out_4_tiles);
}
if (wino_out_4_tiles == 1)
{
float* bias_ptr = NULL;
for (int pss = 0; pss < PER_OUT_CHAN; pss++)
{
int cout_idx = p + pss;
float* out_ptr = output + cout_idx * out_hw + idx_h[0] * TILE * out_w + idx_w[0] * TILE;
if (bias)
{
bias_ptr = ( float* )(bias + cout_idx);
}
float ker00[4] = {2, 4, 8, 0};
tran_out_4(out_buffer + pss * ELEM_SIZE * 4, out_ptr, out_w * sizeof(float), ker00, bias_ptr, activation);
}
}
else
{
float buffer[PER_OUT_CHAN * 4 * ELEM_SIZE];
float* buffer_ptr0 = buffer;
for (int pp = 0; pp < PER_OUT_CHAN; pp++)
{
for (int t = 0; t < 4; t++)
{
for (int ss = 0; ss < ELEM_SIZE; ss++)
{
*buffer_ptr0 = out_buffer[ss * 4 * PER_OUT_CHAN + pp * 4 + t];
buffer_ptr0++;
}
}
}
// end interleave
{
float tmp_buffer[TILE * TILE];
const float* bias_ptr = NULL;
for (int pss = 0; pss < PER_OUT_CHAN; pss++)
{
int cout_idx = p + pss;
float* out_ptr = output + cout_idx * out_hw;
if (bias)
{
bias_ptr = bias + cout_idx;
}
for (int ii = 0; ii < 4; ii++)
{
int i_h = idx_h[ii];
int j_w = idx_w[ii];
if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) ||
(resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1)))
{
trans_output_f43(buffer + ii * ELEM_SIZE + pss * 36 * 4, out_ptr + (i_h * TILE * out_w + j_w * TILE), out_w, (const float*)bias_ptr, activation);
} // direct use_out_ptr
else
{
int ret_h = TILE - resi_h;
if (i_h < block_h - 1)
ret_h = TILE;
int ret_w = TILE - resi_w;
if (j_w < block_w - 1)
ret_w = TILE;
// tmp_buffer
trans_output_f43_ordinary(buffer + ii * ELEM_SIZE + pss * 36 * 4, tmp_buffer, (const float*)bias_ptr);
float* out_pointer = out_ptr + (i_h * TILE * out_w + j_w * TILE);
for (int hh = 0; hh < ret_h; hh++)
{
for (int ww = 0; ww < ret_w; ww++)
{
out_pointer[hh * out_w + ww] = do_activation(tmp_buffer[hh * 4 + ww], activation);
}
}
} // end else, tmp_buff
}
}
}
// end transform
}
#else
for (int s = 0; s < ELEM_SIZE; s++)
{
wino_sgemm_4x12_A17(out_buffer + s * 4 * PER_OUT_CHAN, inp_ptr + s * 4 * cin, ker_ptr + s * PER_OUT_CHAN * cin, cin);
}
float buffer[PER_OUT_CHAN * 4 * ELEM_SIZE];
float* buffer_ptr0 = buffer;
for (int pp = 0; pp < PER_OUT_CHAN; pp++)
{
for (int t = 0; t < 4; t++)
{
for (int ss = 0; ss < ELEM_SIZE; ss++)
{
*buffer_ptr0 = out_buffer[ss * 4 * PER_OUT_CHAN + pp * 4 + t];
buffer_ptr0++;
}
}
}
transform_output_f43_4tile(buffer, output, p, i, block_h, block_w, out_h, out_w, resi_h, resi_w, PER_OUT_CHAN, bias, activation);
#endif
}
for (; i < block_hw; i++)
{
const float* inp_ptr = inp + i * ELEM_SIZE * cin;
float out_buffer[PER_OUT_CHAN * ELEM_SIZE];
for (int s = 0; s < ELEM_SIZE; s++)
{
#ifdef __aarch64__
wino_sgemm_1x16(out_buffer + s * PER_OUT_CHAN, inp_ptr + s * cin, ker_ptr + s * PER_OUT_CHAN * cin, cin);
#else
wino_sgemm_1x12_A17(out_buffer + s * PER_OUT_CHAN, inp_ptr + s * cin, ker_ptr + s * PER_OUT_CHAN * cin, cin);
#endif
}
// interleave
float buffer[PER_OUT_CHAN * ELEM_SIZE];
float* buffer_ptr0 = buffer;
for (int pp = 0; pp < PER_OUT_CHAN; pp++)
{
for (int ss = 0; ss < ELEM_SIZE; ss++)
{
*buffer_ptr0 = out_buffer[ss * PER_OUT_CHAN + pp];
buffer_ptr0++;
}
}
// end interleave
transform_output_f43_1tile(( const float* )buffer, output, p, i, block_h, block_w, out_hw, out_w, resi_h, resi_w, PER_OUT_CHAN, bias, activation);
// end transform
}
}
}
void wino_sgemm_4x4(const float* ker, const float* inp, float* output, const float* bias, int cin, int cout_start,
int cout_end, int block_h, int block_w, int out_h, int out_w, int resi_h, int resi_w,
int activation, int num_thread, int cpu_affinity)
{
int block_hw = block_h * block_w;
int out_hw = out_w * out_h;
int p, i;
int flag_outw = 1;
if (out_w < 16)
flag_outw = 0;
const float* ker_ptr;
const float* inp_ptr;
for (p = (cout_start & -4); p < (cout_end & -4); p += 4)
{
ker_ptr = ker + p * ELEM_SIZE * cin;
for (i = 0; i < (block_hw & -4); i += 4)
{
inp_ptr = inp + i * ELEM_SIZE * cin;
float out_buffer[4 * 4 * ELEM_SIZE];
#ifdef __aarch64__
int idx_h[4];
int idx_w[4];
idx_h[0] = (i) / block_w;
idx_h[1] = (i + 1) / block_w;
idx_h[2] = (i + 2) / block_w;
idx_h[3] = (i + 3) / block_w;
idx_w[0] = (i) % block_w;
idx_w[1] = (i + 1) % block_w;
idx_w[2] = (i + 2) % block_w;
idx_w[3] = (i + 3) % block_w;
int wino_out_4_tiles = 0;
int mulitplier = 4;
if (flag_outw)
if ((idx_h[0] == idx_h[3]) && (idx_h[0] < (block_h - 1)) && (idx_w[3] < (block_w - 1)))
{
wino_out_4_tiles = 1;
mulitplier = 1;
}
for (int s = 0; s < ELEM_SIZE; s++)
{
{
wino_sgemm_4x4_A72(out_buffer + s * 4 * mulitplier, inp_ptr + s * 4 * cin, ker_ptr + s * 4 * cin,
cin, wino_out_4_tiles);
}
}
if (wino_out_4_tiles == 1)
{
float* bias_ptr = NULL;
for (int pss = 0; pss < 4; pss++)
{
int cout_idx = p + pss;
float* out_ptr = output + cout_idx * out_hw + idx_h[0] * TILE * out_w + idx_w[0] * TILE;
if (bias)
{
bias_ptr = ( float* )(bias + cout_idx);
}
float ker00[4] = {2, 4, 8, 0};
tran_out_4(out_buffer + pss * ELEM_SIZE * 4, out_ptr, out_w * sizeof(float), ker00, bias_ptr,
activation);
}
}
else
{
float buffer[4 * 4 * ELEM_SIZE];
float* buffer_ptr0 = buffer;
for (int pp = 0; pp < 4; pp++)
{
for (int t = 0; t < 4; t++)
{
for (int ss = 0; ss < ELEM_SIZE; ss++)
{
*buffer_ptr0 = out_buffer[ss * 4 * 4 + pp * 4 + t];
buffer_ptr0++;
}
}
}
// end interleave
// transform_output_f43_4tile((const float*)buffer, output, p, i, block_h, block_w, out_hw, out_w,
// resi_h, resi_w,
// KER_COUT_UNIT, bias, bias_term);
{
float tmp_buffer[TILE * TILE];
const float* bias_ptr = NULL;
for (int pss = 0; pss < 4; pss++)
{
int cout_idx = p + pss;
float* out_ptr = output + cout_idx * out_hw;
if (bias)
{
bias_ptr = bias + cout_idx;
}
for (int ii = 0; ii < 4; ii++)
{
int i_h = idx_h[ii];
int j_w = idx_w[ii];
if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) ||
(resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1)))
{
trans_output_f43(buffer + ii * ELEM_SIZE + pss * 36 * 4,
out_ptr + (i_h * TILE * out_w + j_w * TILE), out_w,
( const float* )bias_ptr, activation);
} // direct use_out_ptr
else
{
int ret_h = TILE - resi_h;
if (i_h < block_h - 1)
ret_h = TILE;
int ret_w = TILE - resi_w;
if (j_w < block_w - 1)
ret_w = TILE;
// tmp_buffer
trans_output_f43_ordinary(buffer + ii * ELEM_SIZE + pss * 36 * 4, tmp_buffer,
( const float* )bias_ptr);
float* out_pointer = out_ptr + (i_h * TILE * out_w + j_w * TILE);
for (int hh = 0; hh < ret_h; hh++)
{
for (int ww = 0; ww < ret_w; ww++)
{
out_pointer[hh * out_w + ww] =
do_activation(tmp_buffer[hh * 4 + ww], activation);
}
}
} // end else, tmp_buff
}
}
}
// end transform
}
#else
for (int s = 0; s < ELEM_SIZE; s++)
{
wino_sgemm_4x4_A17(out_buffer + s * 4 * 4, inp_ptr + s * 4 * cin, ker_ptr + s * 4 * cin, cin);
}
// interleave
float buffer[4 * 4 * ELEM_SIZE];
float* buffer_ptr0 = buffer;
for (int pp = 0; pp < 4; pp++)
{
for (int t = 0; t < 4; t++)
{
for (int ss = 0; ss < ELEM_SIZE; ss++)
{
*buffer_ptr0 = out_buffer[ss * 4 * 4 + pp * 4 + t];
buffer_ptr0++;
}
}
}
// end interleave
transform_output_f43_4tile(buffer, output, p, i, block_h, block_w, out_h, out_w, resi_h, resi_w, 4, bias,
activation);
#endif
}
for (; i < block_hw; i++)
{
inp_ptr = inp + i * ELEM_SIZE * cin;
float out_buffer[4 * ELEM_SIZE];
for (int s = 0; s < ELEM_SIZE; s++)
{
#ifdef __aarch64__
wino_sgemm_1x4(out_buffer + s * 4, inp_ptr + s * cin, ker_ptr + s * 4 * cin, cin);
#else
wino_sgemm_1x4_cpu(out_buffer + s * 4, inp_ptr + s * cin, ker_ptr + s * 4 * cin, cin);
#endif
}
// interleave
float buffer[4 * ELEM_SIZE];
float* buffer_ptr0 = buffer;
for (int pp = 0; pp < 4; pp++)
{
for (int ss = 0; ss < ELEM_SIZE; ss++)
{
*buffer_ptr0 = out_buffer[ss * 4 + pp];
buffer_ptr0++;
}
}
// end interleave
transform_output_f43_1tile(( const float* )buffer, output, p, i, block_h, block_w, out_hw, out_w, resi_h,
resi_w, 4, bias, activation);
// end transform
}
}
for (p = (cout_end & -4); p < cout_end; p++)
{
ker_ptr = ker + p * ELEM_SIZE * cin;
for (i = 0; i < (block_hw & -4); i += 4)
{
inp_ptr = inp + i * ELEM_SIZE * cin;
float buffer[4 * ELEM_SIZE];
int idx_h[4];
int idx_w[4];
idx_h[0] = (i) / block_w;
idx_h[1] = (i + 1) / block_w;
idx_h[2] = (i + 2) / block_w;
idx_h[3] = (i + 3) / block_w;
idx_w[0] = (i) % block_w;
idx_w[1] = (i + 1) % block_w;
idx_w[2] = (i + 2) % block_w;
idx_w[3] = (i + 3) % block_w;
// gemm+interleave buffer[4][36]
for (int s = 0; s < ELEM_SIZE; s++)
{
float* inp_ = ( float* )(inp_ptr + s * 4 * cin);
float* ker_ = ( float* )(ker_ptr + s * cin);
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
for (int k = 0; k < cin; k++)
{
sum0 += inp_[k * 4] * ker_[k];
sum1 += inp_[k * 4 + 1] * ker_[k];
sum2 += inp_[k * 4 + 2] * ker_[k];
sum3 += inp_[k * 4 + 3] * ker_[k];
}
buffer[s] = sum0;
buffer[36 + s] = sum1;
buffer[72 + s] = sum2;
buffer[108 + s] = sum3;
}
// trans_out buffer[4][36]
float tmp_buffer[TILE * TILE];
const float* bias_ptr = NULL;
float* out_ptr = output + p * out_hw;
if (bias)
{
bias_ptr = bias + p;
}
for (int ii = 0; ii < 4; ii++)
{
int i_h = idx_h[ii];
int j_w = idx_w[ii];
if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) ||
(resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1)))
{
trans_output_f43(buffer + ii * ELEM_SIZE, out_ptr + (i_h * TILE * out_w + j_w * TILE), out_w,
( const float* )bias_ptr, activation);
} // direct use_out_ptr
else
{
int ret_h = TILE - resi_h;
if (i_h < block_h - 1)
ret_h = TILE;
int ret_w = TILE - resi_w;
if (j_w < block_w - 1)
ret_w = TILE;
// tmp_buffer
trans_output_f43_ordinary(buffer + ii * ELEM_SIZE, tmp_buffer, ( const float* )bias_ptr);
float* out_pointer = out_ptr + (i_h * TILE * out_w + j_w * TILE);
for (int hh = 0; hh < ret_h; hh++)
{
for (int ww = 0; ww < ret_w; ww++)
{
out_pointer[hh * out_w + ww] = do_activation(tmp_buffer[hh * 4 + ww], activation);
}
}
} // end else, tmp_buff
} // end transform
}
for (; i < block_hw; i++)
{
inp_ptr = inp + i * ELEM_SIZE * cin;
float buffer[ELEM_SIZE];
for (int s = 0; s < ELEM_SIZE; s++)
{
float* inp_ = ( float* )(inp_ptr + s * cin);
float* ker_ = ( float* )(ker_ptr + s * cin);
float sum = 0;
for (int k = 0; k < cin; k++)
{
sum += inp_[k] * ker_[k];
}
buffer[s] = sum;
}
// end interleave
transform_output_f43_1tile(( const float* )buffer, output, p, i, block_h, block_w, out_hw, out_w, resi_h,
resi_w, 1, bias, activation);
// end transform
}
}
}
static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param)
{
int output_c = filter->dims[0];
int input_c = filter->dims[1];
int trans_ker_size = output_c * input_c * ELEM_SIZE * sizeof(float);
return trans_ker_size + 128; // caution
}
int wino_conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param)
{
int output_c = filter_tensor->dims[0];
int input_c = filter_tensor->dims[1];
int mem_size = get_private_mem_size(filter_tensor, param);
float* trans_mem = ( float* )sys_malloc(mem_size);
if (!priv_info->external_interleave_mem)
{
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
transform_kernel_f43_tile(filter_tensor, trans_mem);
interleave_kernel(trans_mem, ( float* )priv_info->interleave_buffer, output_c, input_c);
sys_free(trans_mem);
return 0;
}
int wino_conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
return 0;
}
int wino_conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
/* param */
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_w0 = param->pad_w0;
int act_type = param->activation;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1];
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int kernel_size = in_c * kernel_h * kernel_w;
int out_c = output_tensor->dims[1];
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int out_hw = out_h * out_w;
int output_size = out_c * out_h * out_w;
int out_c_align = ((out_c + 3) & -4);
/* wino param */
int block_h = (out_h + TILE - 1) / TILE;
int block_w = (out_w + TILE - 1) / TILE;
int block_hw = block_h * block_w;
int padded_in_h = block_h * TILE + 2;
int padded_in_w = block_w * TILE + 2;
int padded_in_hw = padded_in_h * padded_in_w;
/* buffer addr */
float* input_buf = ( float* )input_tensor->data;
float* output_buf = ( float* )output_tensor->data;
float* biases_buf = NULL;
if (bias_tensor != NULL)
biases_buf = ( float* )bias_tensor->data;
float* col_buf = ( float* )priv_info->im2col_buffer;
float* interleave_buf = ( float* )priv_info->interleave_buffer;
float* input_padd_buf = ( float* )sys_malloc(sizeof(float) * padded_in_hw * in_c + 128);
float* trans_input_buf = ( float* )sys_malloc(sizeof(float) * block_hw * in_c * ELEM_SIZE + 128);
int nn_out_c = out_c / PER_OUT_CHAN * PER_OUT_CHAN;
int nn_block = block_hw >> 2;
int resi_block = nn_block << 2;
int resi_h = block_h * TILE - out_h;
int resi_w = block_w * TILE - out_w;
for (int n = 0; n < batch; n++)
{
float* input = input_buf + n * input_size;
float* output = output_buf + n * output_size;
/* PAD input */
pad_input1(input, input_padd_buf, in_c, in_h, in_w, padded_in_h, padded_in_w, pad_h0, pad_w0);
/* trans input */
tran_input_4block(input_padd_buf, trans_input_buf, in_c, block_h, block_w, padded_in_h, padded_in_w);
if (resi_block != block_hw)
{
tran_input_resi_block(input_padd_buf, trans_input_buf, in_c, nn_block, resi_block, block_hw, block_w,
padded_in_hw, padded_in_w);
}
/* sdot */
wino_sgemm_set(interleave_buf, trans_input_buf, output, biases_buf, in_c, nn_out_c, block_h, block_w, out_h,
out_w, resi_h, resi_w, act_type, num_thread, cpu_affinity);
if (nn_out_c != out_c)
{
wino_sgemm_4x4(interleave_buf, trans_input_buf, output, biases_buf, in_c, nn_out_c, out_c, block_h, block_w,
out_h, out_w, resi_h, resi_w, act_type, num_thread, cpu_affinity);
}
}
sys_free(input_padd_buf);
sys_free(trans_input_buf);
return 0;
}
|
genome.c | /* =============================================================================
*
* genome.c
*
* =============================================================================
*
* Copyright (C) Stanford University, 2006. All Rights Reserved.
* Author: Chi Cao Minh
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "gene.h"
#include "random.h"
#include "segments.h"
#include "sequencer.h"
#include "thread.h"
#include "timer.h"
#include "tm.h"
#include "vector.h"
enum param_types {
PARAM_GENE = (unsigned char)'g',
PARAM_NUMBER = (unsigned char)'n',
PARAM_SEGMENT = (unsigned char)'s',
PARAM_THREAD = (unsigned char)'t',
};
#define PARAM_DEFAULT_GENE (1L << 14)
#define PARAM_DEFAULT_NUMBER (1L << 22)
#define PARAM_DEFAULT_SEGMENT (1L << 6)
#define PARAM_DEFAULT_THREAD (1L)
long global_params[256]; /* 256 = ascii limit */
/* =============================================================================
* displayUsage
* =============================================================================
*/
static void
displayUsage (const char* appName)
{
printf("Usage: %s [options]\n", appName);
puts("\nOptions: (defaults)\n");
printf(" g <UINT> Length of [g]ene (%li)\n", PARAM_DEFAULT_GENE);
printf(" n <UINT> Min [n]umber of segments (%li)\n", PARAM_DEFAULT_NUMBER);
printf(" s <UINT> Length of [s]egment (%li)\n", PARAM_DEFAULT_SEGMENT);
printf(" t <UINT> Number of [t]hreads (%li)\n", PARAM_DEFAULT_THREAD);
puts("");
puts("The actual number of segments created may be greater than -n");
puts("in order to completely cover the gene.");
exit(1);
}
/* =============================================================================
* setDefaultParams
* =============================================================================
*/
static void
setDefaultParams( void )
{
global_params[PARAM_GENE] = PARAM_DEFAULT_GENE;
global_params[PARAM_NUMBER] = PARAM_DEFAULT_NUMBER;
global_params[PARAM_SEGMENT] = PARAM_DEFAULT_SEGMENT;
global_params[PARAM_THREAD] = PARAM_DEFAULT_THREAD;
}
/* =============================================================================
* parseArgs
* =============================================================================
*/
static void
parseArgs (long argc, char* const argv[])
{
long i;
long opt;
opterr = 0;
setDefaultParams();
while ((opt = getopt(argc, argv, "g:n:s:t:")) != -1) {
switch (opt) {
case 'g':
case 'n':
case 's':
case 't':
global_params[(unsigned char)opt] = atol(optarg);
break;
case '?':
default:
opterr++;
break;
}
}
for (i = optind; i < argc; i++) {
fprintf(stderr, "Non-option argument: %s\n", argv[i]);
opterr++;
}
if (opterr) {
displayUsage(argv[0]);
}
}
/* =============================================================================
* main
* =============================================================================
*/
MAIN (argc,argv)
{
TIMER_T start;
TIMER_T stop;
/* Initialization */
parseArgs(argc, (char** const)argv);
SIM_GET_NUM_CPU(global_params[PARAM_THREAD]);
printf("Creating gene and segments... ");
fflush(stdout);
long geneLength = global_params[PARAM_GENE];
long segmentLength = global_params[PARAM_SEGMENT];
long minNumSegment = global_params[PARAM_NUMBER];
long numThread = global_params[PARAM_THREAD];
TM_STARTUP(numThread);
P_MEMORY_STARTUP(numThread);
thread_startup(numThread);
random_t* randomPtr = random_alloc();
assert(randomPtr != NULL);
random_seed(randomPtr, 0);
gene_t* genePtr = gene_alloc(geneLength);
assert( genePtr != NULL);
gene_create(genePtr, randomPtr);
char* gene = genePtr->contents;
segments_t* segmentsPtr = segments_alloc(segmentLength, minNumSegment);
assert(segmentsPtr != NULL);
segments_create(segmentsPtr, genePtr, randomPtr);
sequencer_t* sequencerPtr = sequencer_alloc(geneLength, segmentLength, segmentsPtr);
assert(sequencerPtr != NULL);
puts("done.");
printf("Gene length = %li\n", genePtr->length);
printf("Segment length = %li\n", segmentsPtr->length);
printf("Number segments = %li\n", vector_getSize(segmentsPtr->contentsPtr));
fflush(stdout);
/* Benchmark */
printf("Sequencing gene... ");
fflush(stdout);
// NB: Since ASF/PTLSim "REAL" is native execution, and since we are using
// wallclock time, we want to be sure we read time inside the
// simulator, or else we report native cycles spent on the benchmark
// instead of simulator cycles.
GOTO_SIM();
TIMER_READ(start);
#ifdef OTM
#pragma omp parallel
{
sequencer_run(sequencerPtr);
}
#else
thread_start(sequencer_run, (void*)sequencerPtr);
#endif
TIMER_READ(stop);
// NB: As above, timer reads must be done inside of the simulated region
// for PTLSim/ASF
GOTO_REAL();
puts("done.");
printf("Time = %lf\n", TIMER_DIFF_SECONDS(start, stop));
fflush(stdout);
/* Check result */
{
char* sequence = sequencerPtr->sequence;
int result = strcmp(gene, sequence);
printf("Sequence matches gene: %s\n", (result ? "no" : "yes"));
if (result) {
printf("gene = %s\n", gene);
printf("sequence = %s\n", sequence);
}
fflush(stdout);
assert(strlen(sequence) >= strlen(gene));
}
/* Clean up */
printf("Deallocating memory... ");
fflush(stdout);
sequencer_free(sequencerPtr);
segments_free(segmentsPtr);
gene_free(genePtr);
random_free(randomPtr);
puts("done.");
fflush(stdout);
TM_SHUTDOWN();
P_MEMORY_SHUTDOWN();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of genome.c
*
* =============================================================================
*/
|
scheduleg-clause.c | /*
El tipo guiado es lo mismo que el tipo dinamico solo que repartimos de forma max(n/h,chunk)
si tenemos 20 iteraciones y 3 hebras => n = 20, h = 3
chunk = 2
- a la primera hebra libre le damos (20/3, 2) => (round(6.66666) = 7, 2) le damos 7 => n = 20 - 7 = 13
- a la siguiente hebra libre le damos (13/3, 2) => (round(4.3333) = 4,2) => le damos 4 => n = 14 - 4 = 10
.... etc
$ ./bin/scheduleg-clause 20 2
thread 0 suma a[0]=0 suma=0
thread 0 suma a[1]=1 suma=1
thread 0 suma a[2]=2 suma=3
thread 0 suma a[3]=3 suma=6
thread 0 suma a[4]=4 suma=10
thread 0 suma a[5]=5 suma=15
thread 0 suma a[6]=6 suma=21
thread 0 suma a[15]=15 suma=36
thread 0 suma a[16]=16 suma=52
thread 0 suma a[17]=17 suma=69
thread 0 suma a[18]=18 suma=87
thread 0 suma a[19]=19 suma=106
thread 2 suma a[12]=12 suma=12
thread 2 suma a[13]=13 suma=25
thread 2 suma a[14]=14 suma=39
thread 1 suma a[7]=7 suma=7
thread 1 suma a[8]=8 suma=15
thread 1 suma a[9]=9 suma=24
thread 1 suma a[10]=10 suma=34
thread 1 suma a[11]=11 suma=45
Fuera de 'parallel for' suma=106
*/
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int i, n=20,chunk,a[n],suma=0;
if(argc < 3) {
fprintf(stderr,"\nFalta iteraciones y/o chunk \n");
exit(-1);
}
n = atoi(argv[1]);
if (n>20)
n=20;
chunk = atoi(argv[2]);
for (i=0; i<n; i++)
a[i] = i;
//omp_set_num_threads(3);
#pragma omp parallel for firstprivate(suma) lastprivate(suma) schedule(guided,chunk)
for (i=0; i<n; i++){
suma = suma + a[i];
printf(" thread %d suma a[%d]=%d suma=%d \n",
omp_get_thread_num(),i,a[i],suma);
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
} |
imm.h | //===------------------------------------------------------------*- C++ -*-===//
//
// Ripples: A C++ Library for Influence Maximization
// Marco Minutoli <marco.minutoli@pnnl.gov>
// Pacific Northwest National Laboratory
//
//===----------------------------------------------------------------------===//
//
// Copyright (c) 2019, Battelle Memorial Institute
//
// Battelle Memorial Institute (hereinafter Battelle) hereby grants permission
// to any person or entity lawfully obtaining a copy of this software and
// associated documentation files (hereinafter “the Software”) to redistribute
// and use the Software in source and binary forms, with or without
// modification. Such person or entity may use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and may permit
// others to do so, subject to the following conditions:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimers.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Other than as used herein, neither the name Battelle Memorial Institute or
// Battelle may be used in any form whatsoever without the express written
// consent of Battelle.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//===----------------------------------------------------------------------===//
#ifndef RIPPLES_IMM_H
#define RIPPLES_IMM_H
#include <cmath>
#include <cstddef>
#include <limits>
#include <unordered_map>
#include <vector>
#include "nlohmann/json.hpp"
#include "trng/lcg64.hpp"
#include "trng/uniform01_dist.hpp"
#include "trng/uniform_int_dist.hpp"
#include "ripples/configuration.h"
#include "ripples/find_most_influential.h"
#include "ripples/generate_rrr_sets.h"
#include "ripples/imm_execution_record.h"
#include "ripples/tim.h"
#include "ripples/utility.h"
#include "ripples/streaming_rrr_generator.h"
#define CUDA_PROFILE 0
namespace ripples {
//! The IMM algorithm configuration descriptor.
struct IMMConfiguration : public TIMConfiguration {
size_t streaming_workers{0};
size_t streaming_gpu_workers{0};
size_t seed_select_max_workers{std::numeric_limits<size_t>::max()};
size_t seed_select_max_gpu_workers{0};
std::string gpu_mapping_string{""};
std::unordered_map<size_t, size_t> worker_to_gpu;
//! \brief Add command line options to configure IMM.
//!
//! \param app The command-line parser object.
void addCmdOptions(CLI::App &app) {
TIMConfiguration::addCmdOptions(app);
app.add_option(
"--streaming-gpu-workers", streaming_gpu_workers,
"The number of GPU workers for the CPU+GPU streaming engine.")
->group("Streaming-Engine Options");
app.add_option("--streaming-gpu-mapping", gpu_mapping_string,
"A comma-separated set of OpenMP numbers for GPU workers.")
->group("Streaming-Engine Options");
app.add_option("--seed-select-max-workers", seed_select_max_workers,
"The max number of workers for seed selection.")
->group("Streaming-Engine Options");
app.add_option("--seed-select-max-gpu-workers", seed_select_max_gpu_workers,
"The max number of GPU workers for seed selection.")
->group("Streaming-Engine Options");
}
};
//! Retrieve the configuration parsed from command line.
//! \return the configuration parsed from command line.
ToolConfiguration<ripples::IMMConfiguration> configuration();
//! Approximate logarithm of n chose k.
//! \param n
//! \param k
//! \return an approximation of log(n choose k).
inline double logBinomial(size_t n, size_t k) {
return n * log(n) - k * log(k) - (n - k) * log(n - k);
}
//! Compute ThetaPrime.
//!
//! \tparam execution_tag The execution policy
//!
//! \param x The index of the current iteration.
//! \param epsilonPrime Parameter controlling the approximation factor.
//! \param l Parameter usually set to 1.
//! \param k The size of the seed set.
//! \param num_nodes The number of nodes in the input graph.
template <typename execution_tag>
ssize_t ThetaPrime(ssize_t x, double epsilonPrime, double l, size_t k,
size_t num_nodes, execution_tag &&) {
k = std::min(k, num_nodes/2);
return (2 + 2. / 3. * epsilonPrime) *
(l * std::log(num_nodes) + logBinomial(num_nodes, k) +
std::log(std::log2(num_nodes))) *
std::pow(2.0, x) / (epsilonPrime * epsilonPrime);
}
//! Compute Theta.
//!
//! \param epsilon Parameter controlling the approximation factor.
//! \param l Parameter usually set to 1.
//! \param k The size of the seed set.
//! \param LB The estimate of the lower bound.
//! \param num_nodes The number of nodes in the input graph.
inline size_t Theta(double epsilon, double l, size_t k, double LB,
size_t num_nodes) {
if (LB == 0) return 0;
k = std::min(k, num_nodes/2);
double term1 = 0.6321205588285577; // 1 - 1/e
double alpha = sqrt(l * std::log(num_nodes) + std::log(2));
double beta = sqrt(term1 * (logBinomial(num_nodes, k) +
l * std::log(num_nodes) + std::log(2)));
double lamdaStar = 2 * num_nodes * (term1 * alpha + beta) *
(term1 * alpha + beta) * pow(epsilon, -2);
// std::cout << "#### " << lamdaStar << " / " << LB << " = " << lamdaStar / LB << std::endl;
return lamdaStar / LB;
}
//! Collect a set of Random Reverse Reachable set.
//!
//! \tparam GraphTy The type of the input graph.
//! \tparam RRRGeneratorTy The type of the RRR generator.
//! \tparam diff_model_tag Type-Tag to selecte the diffusion model.
//! \tparam execution_tag Type-Tag to select the execution policy.
//!
//! \param G The input graph. The graph is transoposed.
//! \param k The size of the seed set.
//! \param epsilon The parameter controlling the approximation guarantee.
//! \param l Parameter usually set to 1.
//! \param generator The rrr sets generator.
//! \param record Data structure storing timing and event counts.
//! \param model_tag The diffusion model tag.
//! \param ex_tag The execution policy tag.
template <typename GraphTy, typename ConfTy, typename RRRGeneratorTy,
typename diff_model_tag, typename execution_tag>
auto Sampling(const GraphTy &G, const ConfTy &CFG, double l,
RRRGeneratorTy &generator, IMMExecutionRecord &record,
diff_model_tag &&model_tag, execution_tag &&ex_tag) {
using vertex_type = typename GraphTy::vertex_type;
size_t k = CFG.k;
double epsilon = CFG.epsilon;
// sqrt(2) * epsilon
double epsilonPrime = 1.4142135623730951 * epsilon;
double LB = 0;
#ifdef ENABLE_MEMKIND
RRRsetAllocator<vertex_type> allocator(libmemkind::kinds::DAX_KMEM_PREFERRED);
#else
RRRsetAllocator<vertex_type> allocator;
#endif
std::vector<RRRset<GraphTy>> RR;
auto start = std::chrono::high_resolution_clock::now();
size_t thetaPrime = 0;
for (ssize_t x = 1; x < std::log2(G.num_nodes()); ++x) {
// Equation 9
ssize_t thetaPrime = ThetaPrime(x, epsilonPrime, l, k, G.num_nodes(),
std::forward<execution_tag>(ex_tag));
size_t delta = thetaPrime - RR.size();
record.ThetaPrimeDeltas.push_back(delta);
auto timeRRRSets = measure<>::exec_time([&]() {
RR.insert(RR.end(), delta, RRRset<GraphTy>(allocator));
auto begin = RR.end() - delta;
GenerateRRRSets(G, generator, begin, RR.end(), record,
std::forward<diff_model_tag>(model_tag),
std::forward<execution_tag>(ex_tag));
});
record.ThetaEstimationGenerateRRR.push_back(timeRRRSets);
double f;
auto timeMostInfluential = measure<>::exec_time([&]() {
const auto &S =
FindMostInfluentialSet(G, CFG, RR, record, generator.isGpuEnabled(),
std::forward<execution_tag>(ex_tag));
f = S.first;
});
record.ThetaEstimationMostInfluential.push_back(timeMostInfluential);
if (f >= std::pow(2, -x)) {
// std::cout << "Fraction " << f << std::endl;
LB = (G.num_nodes() * f) / (1 + epsilonPrime);
break;
}
}
size_t theta = Theta(epsilon, l, k, LB, G.num_nodes());
auto end = std::chrono::high_resolution_clock::now();
record.ThetaEstimationTotal = end - start;
record.Theta = theta;
std::cout << G.num_nodes() << "," << theta << std::endl;
record.GenerateRRRSets = measure<>::exec_time([&]() {
if (theta > RR.size()) {
size_t final_delta = theta - RR.size();
RR.insert(RR.end(), final_delta, RRRset<GraphTy>(allocator));
auto begin = RR.end() - final_delta;
GenerateRRRSets(G, generator, begin, RR.end(), record,
std::forward<diff_model_tag>(model_tag),
std::forward<execution_tag>(ex_tag));
}
});
return RR;
}
template <typename GraphTy, typename ConfTy, typename RRRGeneratorTy,
typename diff_model_tag>
auto Sampling(const GraphTy &G, const ConfTy &CFG, double l,
RRRGeneratorTy &generator, IMMExecutionRecord &record,
diff_model_tag &&model_tag, sequential_tag &&ex_tag) {
using vertex_type = typename GraphTy::vertex_type;
size_t k = CFG.k;
double epsilon = CFG.epsilon;
// sqrt(2) * epsilon
double epsilonPrime = 1.4142135623730951 * epsilon;
double LB = 0;
#ifdef ENABLE_MEMKIND
RRRsetAllocator<vertex_type> allocator(libmemkind::kinds::DAX_KMEM_PREFERRED);
#else
RRRsetAllocator<vertex_type> allocator;
#endif
std::vector<RRRset<GraphTy>> RR;
auto start = std::chrono::high_resolution_clock::now();
size_t thetaPrime = 0;
for (ssize_t x = 1; x < std::log2(G.num_nodes()); ++x) {
// Equation 9
ssize_t thetaPrime = ThetaPrime(x, epsilonPrime, l, k, G.num_nodes(),
std::forward<sequential_tag>(ex_tag));
size_t delta = thetaPrime - RR.size();
record.ThetaPrimeDeltas.push_back(delta);
auto timeRRRSets = measure<>::exec_time([&]() {
RR.insert(RR.end(), delta, RRRset<GraphTy>(allocator));
auto begin = RR.end() - delta;
GenerateRRRSets(G, generator, begin, RR.end(), record,
std::forward<diff_model_tag>(model_tag),
std::forward<sequential_tag>(ex_tag));
});
record.ThetaEstimationGenerateRRR.push_back(timeRRRSets);
double f;
auto timeMostInfluential = measure<>::exec_time([&]() {
const auto &S = FindMostInfluentialSet(
G, CFG, RR, record, false, std::forward<sequential_tag>(ex_tag));
f = S.first;
});
record.ThetaEstimationMostInfluential.push_back(timeMostInfluential);
if (f >= std::pow(2, -x)) {
LB = (G.num_nodes() * f) / (1 + epsilonPrime);
break;
}
}
size_t theta = Theta(epsilon, l, k, LB, G.num_nodes());
auto end = std::chrono::high_resolution_clock::now();
record.ThetaEstimationTotal = end - start;
record.Theta = theta;
record.GenerateRRRSets = measure<>::exec_time([&]() {
if (theta > RR.size()) {
size_t final_delta = theta - RR.size();
RR.insert(RR.end(), final_delta, RRRset<GraphTy>(allocator));
auto begin = RR.end() - final_delta;
GenerateRRRSets(G, generator, begin, RR.end(), record,
std::forward<diff_model_tag>(model_tag),
std::forward<sequential_tag>(ex_tag));
}
});
return RR;
}
//! The IMM algroithm for Influence Maximization
//!
//! \tparam GraphTy The type of the input graph.
//! \tparam ConfTy The configuration type.
//! \tparam PRNG The type of the parallel random number generator.
//! \tparam diff_model_tag Type-Tag to selecte the diffusion model.
//!
//! \param G The input graph. The graph is transoposed.
//! \param CFG The configuration.
//! \param l Parameter usually set to 1.
//! \param gen The parallel random number generator.
//! \param model_tag The diffusion model tag.
//! \param ex_tag The execution policy tag.
template <typename GraphTy, typename ConfTy, typename PRNG,
typename diff_model_tag>
auto IMM(const GraphTy &G, const ConfTy &CFG, double l, PRNG &gen,
IMMExecutionRecord &record, diff_model_tag &&model_tag,
sequential_tag &&ex_tag) {
using vertex_type = typename GraphTy::vertex_type;
size_t k = CFG.k;
double epsilon = CFG.epsilon;
std::vector<trng::lcg64> generator(1, gen);
l = l * (1 + 1 / std::log2(G.num_nodes()));
auto R = Sampling(G, CFG, l, generator, record,
std::forward<diff_model_tag>(model_tag),
std::forward<sequential_tag>(ex_tag));
#if CUDA_PROFILE
auto logst = spdlog::stdout_color_st("IMM-profile");
std::vector<size_t> rrr_sizes;
for (auto &rrr_set : R) rrr_sizes.push_back(rrr_set.size());
print_profile_counter(logst, rrr_sizes, "RRR sizes");
#endif
auto start = std::chrono::high_resolution_clock::now();
const auto &S = FindMostInfluentialSet(G, CFG, R, record, false,
std::forward<sequential_tag>(ex_tag));
auto end = std::chrono::high_resolution_clock::now();
record.FindMostInfluentialSet = end - start;
return S.second;
}
//! The IMM algroithm for Influence Maximization
//!
//! \tparam GraphTy The type of the input graph.
//! \tparam ConfTy The configuration type
//! \tparam PRNG The type of the parallel random number generator.
//! \tparam diff_model_tag Type-Tag to selecte the diffusion model.
//! \tparam execution_tag Type-Tag to select the execution policy.
//!
//! \param G The input graph. The graph is transoposed.
//! \param CFG The configuration.
//! \param l Parameter usually set to 1.
//! \param gen The parallel random number generator.
//! \param model_tag The diffusion model tag.
//! \param ex_tag The execution policy tag.
template <typename GraphTy, typename ConfTy, typename GeneratorTy,
typename diff_model_tag>
auto IMM(const GraphTy &G, const ConfTy &CFG, double l, GeneratorTy &gen,
diff_model_tag &&model_tag, omp_parallel_tag &&ex_tag) {
using vertex_type = typename GraphTy::vertex_type;
size_t k = CFG.k;
double epsilon = CFG.epsilon;
auto &record(gen.execution_record());
l = l * (1 + 1 / std::log2(G.num_nodes()));
auto R =
Sampling(G, CFG, l, gen, record, std::forward<diff_model_tag>(model_tag),
std::forward<omp_parallel_tag>(ex_tag));
#if CUDA_PROFILE
auto logst = spdlog::stdout_color_st("IMM-profile");
std::vector<size_t> rrr_sizes;
size_t sizeBytes = 0;
for (auto &rrr_set : R) {
rrr_sizes.push_back(rrr_set.size());
sizeBytes += rrr_set.size() * sizeof(rrr_set[0]);
}
record.RRRSetSize = sizeBytes;
print_profile_counter(logst, rrr_sizes, "RRR sizes");
#endif
auto start = std::chrono::high_resolution_clock::now();
const auto &S =
FindMostInfluentialSet(G, CFG, R, record, gen.isGpuEnabled(),
std::forward<omp_parallel_tag>(ex_tag));
auto end = std::chrono::high_resolution_clock::now();
record.FindMostInfluentialSet = end - start;
start = std::chrono::high_resolution_clock::now();
size_t total_size = 0;
#pragma omp parallel for reduction(+:total_size)
for (size_t i = 0; i < R.size(); ++i) {
total_size += R[i].size() * sizeof(vertex_type);
}
record.RRRSetSize = total_size;
end = std::chrono::high_resolution_clock::now();
record.Total = end - start;
return S.second;
}
} // namespace ripples
#endif // RIPPLES_IMM_H
|
ordered2.c | #include <stdio.h>
#include <omp.h>
int main(void)
{
int i=0,t_id;
#pragma omp parallel for ordered private (t_id)
for (i=0;i<100;i++)
{
t_id= omp_get_thread_num();
#pragma omp ordered
{
printf("I am i=%d in thread %d\n",i,t_id);
}
}
return 0;
}
|
omp_barrier.c | #include<stdio.h>
#include<omp.h>
#include<stdlib.h>
int main()
{
int x=2;
#pragma omp parallel num_threads(2) shared(x)
{
if (omp_get_thread_num()==0)
{
x = 5;
}
else
{
printf("1 : Thread %d : x = %d\n", omp_get_thread_num(), x);
}
#pragma omp barrier
if (omp_get_thread_num() == 0)
{
printf("2 : Thread %d : x = %d\n", omp_get_thread_num(), x);
}
else
{
printf("3 : Thread %d : x = %d\n", omp_get_thread_num(), x);
}
}
return 0;
} |
Q2_Solution_Batch.h | #pragma once
#include <queue>
#include <algorithm>
#include <cassert>
#include <numeric>
#include <memory>
#include <set>
#include "utils.h"
#include "load.h"
#include "Q2_Solution.h"
class Q2_Solution_Batch : public Q2_Solution {
protected:
static std::vector<uint64_t>
convert_score_type_to_comment_id(const std::vector<score_type> &top_scores, const Q2_Input &input) {
std::vector<uint64_t> top_scores_vector;
top_scores_vector.reserve(top_count);
// convert row indices to original comment IDs
std::transform(top_scores.rbegin(), top_scores.rend(), std::back_inserter(top_scores_vector),
[&input](const auto &score_tuple) {
return input.comments[std::get<2>(score_tuple)].comment_id;
});
return top_scores_vector;
}
static inline void
compute_score_for_comment(const Q2_Input &input, GrB_Index comment_col, const GrB_Index *likes_comment_array_begin,
const GrB_Index *likes_comment_array_end, const GrB_Index *likes_user_array_begin,
std::vector<score_type> &top_scores) __attribute__ ((always_inline)) {
// find tuple sequences of each comment in row-major array
// users liking a comment are stored consecutively
auto[likes_comment_begin, likes_comment_end] = std::equal_range(likes_comment_array_begin,
likes_comment_array_end, comment_col);
if (likes_comment_begin != likes_comment_end) {
GrB_Index likes_count = std::distance(likes_comment_begin, likes_comment_end);
// get position of first user liking that comment
const GrB_Index *likes_user_begin =
likes_user_array_begin + std::distance(likes_comment_array_begin, likes_comment_begin);
// extract friendships submatrix of users liking the comment
GBxx_Object<GrB_Matrix> friends_overlay_graph = GB(GrB_Matrix_new, GrB_BOOL, likes_count, likes_count);
ok(GrB_Matrix_extract(friends_overlay_graph.get(), GrB_NULL, GrB_NULL,
input.friends_matrix.get(),
likes_user_begin, likes_count, likes_user_begin, likes_count,
GrB_NULL));
// assuming that all component_ids will be in [0, n)
GBxx_Object<GrB_Vector> components_vector = GB(LAGraph_cc_fastsv, friends_overlay_graph.get(), false);
GrB_Index nvals;
#ifndef NDEBUG
nvals = input.likes_num;
ok(GrB_Vector_nvals(&nvals, components_vector.get()));
assert(nvals == likes_count);
GrB_Index n;
ok(GrB_Vector_size(&n, components_vector.get()));
assert(n == likes_count);
#endif
std::vector<uint64_t> components(likes_count),
component_sizes(likes_count);
// nullptr: SuiteSparse extension
nvals = likes_count;
ok(GrB_Vector_extractTuples_UINT64(nullptr, components.data(), &nvals, components_vector.get()));
assert(nvals == likes_count);
// count size of each component
for (auto component_id:components)
++component_sizes[component_id];
std::transform(component_sizes.begin(), component_sizes.end(), component_sizes.begin(),
[](uint64_t n) { return n * n; });
uint64_t score = std::accumulate(component_sizes.begin(), component_sizes.end(), uint64_t());
add_score_to_toplist(top_scores,
std::make_tuple(score, input.comments[comment_col].timestamp, comment_col));
}
}
public:
using Q2_Solution::Q2_Solution;
virtual void compute_score_for_all_comments(const GrB_Index *likes_comment_array_begin,
const GrB_Index *likes_comment_array_end,
const GrB_Index *likes_user_array_begin,
std::vector<score_type> &top_scores) const {
int nthreads = LAGraph_get_nthreads();
#pragma omp parallel num_threads(nthreads)
{
std::vector<score_type> top_scores_local;
#pragma omp for schedule(dynamic)
for (GrB_Index comment_col = 0; comment_col < input.comments_size(); ++comment_col) {
compute_score_for_comment(input, comment_col, likes_comment_array_begin, likes_comment_array_end,
likes_user_array_begin, top_scores_local);
}
#pragma omp critical(Q2_add_score_to_toplist)
for (auto score : top_scores_local) {
add_score_to_toplist(top_scores, score);
}
}
}
std::vector<score_type> calculate_score() {
std::vector<score_type> top_scores;
std::unique_ptr<GrB_Index[]> likes_trg_comment_columns{new GrB_Index[input.likes_num]},
likes_src_user_columns{new GrB_Index[input.likes_num]};
GrB_Index *likes_comment_array_begin = likes_trg_comment_columns.get(),
*likes_comment_array_end = likes_trg_comment_columns.get() + input.likes_num,
*likes_user_array_begin = likes_src_user_columns.get();
// nullptr to avoid extracting matrix values (SuiteSparse extension)
GrB_Index nvals = input.likes_num;
// extract likes edges row-wise, users liking a comment are stored consecutively
ok(GrB_Matrix_extractTuples_BOOL(likes_trg_comment_columns.get(), likes_src_user_columns.get(), nullptr, &nvals,
input.likes_matrix_tran.get()));
assert(nvals == input.likes_num);
compute_score_for_all_comments(likes_comment_array_begin, likes_comment_array_end, likes_user_array_begin,
top_scores);
// if comments with likes are not enough collect comments without like
if (top_scores.size() < top_count) {
for (GrB_Index comment_col = 0; comment_col < input.comments_size(); ++comment_col) {
if (std::none_of(top_scores.begin(), top_scores.end(),
[comment_col](auto const &tuple) { return std::get<2>(tuple) == comment_col; })) {
// try to add this comment if not present
add_score_to_toplist(top_scores,
std::make_tuple(0, input.comments[comment_col].timestamp, comment_col));
}
}
}
sort_top_scores(top_scores);
return top_scores;
}
std::vector<uint64_t> initial_calculation() override {
return convert_score_type_to_comment_id(calculate_score(), input);
}
std::vector<uint64_t> update_calculation(int iteration, const Q2_Input::Update_Type ¤t_updates) override {
return convert_score_type_to_comment_id(calculate_score(), input);
}
};
|
parallel-reduction2.c | #include<assert.h>
#include<omp.h>
#include<stdio.h>
int main(void)
{
int i =100, sum=100;
int thread_num;
#pragma omp parallel reduction(+:sum)
{
#pragma omp single
{
thread_num = omp_get_num_threads();
}
sum += i;
}
printf("thread num=%d sum =%d\n", thread_num, sum);
assert(sum == (i*thread_num + 100));
sum = 100;
#pragma omp parallel private(sum)
{
#pragma omp single
{
thread_num = omp_get_num_threads();
}
sum += i;
}
printf("thread num=%d sum =%d\n", thread_num, sum);
assert(sum != (i*thread_num + 100));
return 0;
}
|
GB_unop__lnot_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lnot_uint64_uint64
// op(A') function: GB_unop_tran__lnot_uint64_uint64
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lnot_uint64_uint64
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = !(z != 0) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lnot_uint64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | /*
* Main program
*
* This file is a part of solution of a Intel Winter Summer School problem
* Copyright (c) 2010 Roman Tsisyk <roman@tsisyk.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/time.h> // gettimeofday
#include <omp.h>
#include "matmul.h"
#ifdef USE_INT
static const char *input_format="%ld";
#else
static const char *input_format="%lf";
#endif
static const char *size_format="%llu";
/*
* Benchmark utlis
*/
struct timeval start_time;
static void bench_start() {
gettimeofday(&start_time, NULL);
}
static void bench_end() {
struct timeval end_time;
gettimeofday(&end_time, NULL);
double delta = (double) end_time.tv_sec - start_time.tv_sec;
delta += (end_time.tv_usec - start_time.tv_usec) * 1e-6;
printf("%0.4f seconds\n", delta);
}
/*
* Reads matrix from file
*/
static data_t *read_matrix_r(const char *path, size_t *out_height, size_t *out_width) {
FILE *f = fopen(path, "r");
size_t height = 0;
size_t width = 0;
fscanf(f, size_format, &height);
fscanf(f, size_format, &width);
data_t *M = (data_t *) malloc(sizeof(data_t) * height * width);
data_t *v = M;
data_t *end = M + height * width;
while(!feof(f) && v < end) {
fscanf(f, input_format, v++);
}
fclose(f);
*out_height = height;
*out_width = width;
return M;
}
/*
* Writes result to file
*/
static void save_result(const char *path, data_t value, size_t max_i,
size_t max_j) {
FILE *f = fopen(path, "w");
fprintf(f, input_format, value);
fputc(' ', f);
fprintf(f, size_format, max_i);
fputc(' ', f);
fprintf(f, size_format, max_j);
fputc('\n', f);
fclose(f);
}
/*
* Solve WinterSchool 2010 problem ( max{A x B} )
*/
static data_t ws_problem(size_t heightA, size_t widthA, size_t widthB,
data_t *A, data_t *B, size_t *out_max_i, size_t *out_max_j) {
data_t *R = (data_t *) malloc(sizeof(data_t) * heightA * widthB);
#ifdef WITH_MKL
bench_start();
// use MKL
matmul_mkl(heightA, widthA, widthB, A, widthA, B, widthB, R, widthB);
#else
// use internal algorithm (strassen and/or three-loop)
matmul_matmul(heightA, widthA, widthB, A, widthA, B, widthB, R, widthB);
#endif
printf("Multiplication: ");
bench_end();
bench_start();
data_t *max = R;
#pragma omp parallel for default(none) shared(R, max, heightA, widthB)
for(size_t i = 0; i < heightA; i++) {
data_t *start = R + i * widthB;
data_t *end = start + widthB;
data_t *local_max = start++;
for(; start < end; start++) {
if(*start > *local_max)
local_max = start;
}
#pragma omp critical
{
if(*local_max > *max )
max = local_max;
}
}
/* // faster, but complicated and not tested yet
int max_threads = omp_get_max_threads();
data_t **max_arr = (data_t **) malloc(sizeof(data_t*) * max_threads);
printf("Max threads: %d", max_threads);
data_t **max_arr_it = max_arr;
data_t **max_arr_end = max_arr + max_threads;
for(; max_arr_it < max_arr_end; max_arr_it++)
*max_arr_it = R;
#pragma omp parallel for shared(max_arr)
for(size_t i =0; i < heightA; i++) {
data_t *start = R + i * widthB;
data_t *end = start + widthB;
data_t *local_max = start++;
for(; start < end; start++) {
if(*start > *local_max)
local_max = start;
}
data_t **global_max_p = max_arr + omp_get_thread_num();
if(*local_max > **global_max_p )
*global_max_p = local_max;
}
data_t *max = *max_arr;
for (max_arr_it = max_arr + 1; max_arr_it < max_arr_end; max_arr_it++) {
//printf("Max: %lf\n", **max_arr_it);
if (**max_arr_it > *max)
max = *max_arr_it;
}
free(max_arr);
*/
size_t max_i = (max - R) / widthB;
size_t max_j = (max - R) % widthB;
printf("Problem: ");
bench_end();
if(out_max_i != NULL)
*out_max_i = max_i;
if(out_max_j != NULL)
*out_max_j = max_j;
return *max;
}
/*
* Prints usage information
*/
static void usage() {
printf("Usage: ./me matrixA.file matrixB.file out.file\n");
}
/*
* Entry point
*/
int main(int argc, char *argv[]) {
if (argc != 4) {
usage();
exit(1);
}
matmul_init();
int rc =0;
char *pathA = argv[1];
char *pathB = argv[2];
char *pathR = argv[3];
data_t *A;
size_t heightA;
size_t widthA;
data_t *B;
size_t heightB;
size_t widthB;
bench_start();
// there is no sense to parallel disk IO
A = read_matrix_r(pathA, &heightA, &widthA);
if(NULL == A)
goto quit;
B = read_matrix_r(pathB, &heightB, &widthB);
if(NULL == B)
goto quit_A;
if(widthA != heightB) {
// what did you want to do?
fprintf(stderr, "Invalid input!\n");
goto quit_B;
}
printf("IO: ");
bench_end();
size_t max_i;
size_t max_j;
data_t max_value;
max_value = ws_problem(heightA, widthA, widthB, A, B, &max_i, &max_j);
save_result(pathR, max_value, max_i, max_j);
quit_B:
free(B);
quit_A:
free(A);
quit:
matmul_fini();
return rc;
}
|
ten_tusscher_2004_epi_S2_14.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S2_14.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5952182591768,0.00128266400523176,0.780370393090429,0.780208222766858,0.000174041905078485,0.485370727173588,0.00293466121399432,0.999998357055344,1.92482840573537e-08,1.88428105751378e-05,0.999770837182767,1.00699532179645,0.999993733315635,4.75139548173797e-05,0.266377866651071,10.2975786179389,139.536672800382};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.5369194152843,0.000421161732329444,0.000123555730992675,0.000438546024943873,0.268273630830681,0.123585165023946,0.171035514336793,5.02847725301225,0.0110176202871206,1.84752137000130,1095.52052508604,0.000393152126659795,0.528629865494676,0.00975540076461500,0.00491948125354052,8.11442676720905e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
GB_binop__iseq_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__iseq_uint8
// A.*B function (eWiseMult): GB_AemultB__iseq_uint8
// A*D function (colscale): GB_AxD__iseq_uint8
// D*A function (rowscale): GB_DxB__iseq_uint8
// C+=B function (dense accum): GB_Cdense_accumB__iseq_uint8
// C+=b function (dense accum): GB_Cdense_accumb__iseq_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_uint8
// C=scalar+B GB_bind1st__iseq_uint8
// C=scalar+B' GB_bind1st_tran__iseq_uint8
// C=A+scalar GB_bind2nd__iseq_uint8
// C=A'+scalar GB_bind2nd_tran__iseq_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_UINT8 || GxB_NO_ISEQ_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__iseq_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__iseq_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__iseq_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__iseq_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__iseq_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__iseq_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__iseq_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__iseq_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__iseq_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__iseq_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__iseq_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_fp32
// op(A') function: GB_tran__lnot_fp32_fp32
// C type: float
// A type: float
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_fp32
(
float *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_orphan.c | /******************************************************************************
* FILE: omp_orphan.c
* DESCRIPTION:
* OpenMP Example - Parallel region with an orphaned directive - C/C++ Version
* This example demonstrates a dot product being performed by an orphaned
* loop reduction construct. Scoping of the reduction variable is critical.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 06/30/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define VECLEN 100
float a[VECLEN], b[VECLEN], sum;
float dotprod ()
{
int i,tid;
tid = omp_get_thread_num();
#pragma omp for reduction(+:sum)
for (i=0; i < VECLEN; i++)
{
sum = sum + (a[i]*b[i]);
printf(" tid= %d i=%d\n",tid,i);
}
}
int main (int argc, char *argv[]) {
int i;
for (i=0; i < VECLEN; i++)
a[i] = b[i] = 1.0 * i;
sum = 0.0;
#pragma omp parallel
dotprod();
printf("Sum = %f\n",sum);
}
|
reg_detect.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is int, default size is 50. */
#include "reg_detect.h"
/* Array initialization. */
static
void init_array(int maxgrid,
DATA_TYPE POLYBENCH_2D(sum_tang,MAXGRID,MAXGRID,maxgrid,maxgrid),
DATA_TYPE POLYBENCH_2D(mean,MAXGRID,MAXGRID,maxgrid,maxgrid),
DATA_TYPE POLYBENCH_2D(path,MAXGRID,MAXGRID,maxgrid,maxgrid))
{
int i __attribute__((annotate("scalar(range(0, " PB_XSTR(MAXGRID) ") final)")));
int j __attribute__((annotate("scalar(range(0, " PB_XSTR(MAXGRID) ") final)")));
for (i = 0; i < maxgrid; i++)
for (j = 0; j < maxgrid; j++) {
sum_tang[i][j] = (DATA_TYPE)((i+1)*(j+1));
mean[i][j] = ((DATA_TYPE) i-j) / maxgrid;
path[i][j] = ((DATA_TYPE) i*(j-1)) / maxgrid;
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int maxgrid,
DATA_TYPE POLYBENCH_2D(path,MAXGRID,MAXGRID,maxgrid,maxgrid))
{
int i, j;
for (i = 0; i < maxgrid; i++)
for (j = 0; j < maxgrid; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, path[i][j]);
if ((i * maxgrid + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
/* Source (modified): http://www.cs.uic.edu/~iluican/reg_detect.c */
static
void kernel_reg_detect(int niter, int maxgrid, int length,
DATA_TYPE POLYBENCH_2D(sum_tang,MAXGRID,MAXGRID,maxgrid,maxgrid),
DATA_TYPE POLYBENCH_2D(mean,MAXGRID,MAXGRID,maxgrid,maxgrid),
DATA_TYPE POLYBENCH_2D(path,MAXGRID,MAXGRID,maxgrid,maxgrid),
DATA_TYPE POLYBENCH_3D(diff,MAXGRID,MAXGRID,LENGTH,maxgrid,maxgrid,length),
DATA_TYPE POLYBENCH_3D(sum_diff,MAXGRID,MAXGRID,LENGTH,maxgrid,maxgrid,length))
{
int t, i, j, cnt;
#pragma scop
#pragma omp parallel
{
#pragma omp master
{
for (t = 0; t < _PB_NITER; t++)
{
#pragma omp parallel for private (i, cnt) collapse(2) schedule(static)
for (j = 0; j <= _PB_MAXGRID - 1; j++)
for (i = j; i <= _PB_MAXGRID - 1; i++)
for (cnt = 0; cnt <= _PB_LENGTH - 1; cnt++)
diff[j][i][cnt] = sum_tang[j][i];
#pragma omp parallel for private (i, cnt) collapse(2) schedule(static)
for (j = 0; j <= _PB_MAXGRID - 1; j++)
{
for (i = j; i <= _PB_MAXGRID - 1; i++)
{
sum_diff[j][i][0] = diff[j][i][0];
for (cnt = 1; cnt <= _PB_LENGTH - 1; cnt++)
sum_diff[j][i][cnt] = sum_diff[j][i][cnt - 1] + diff[j][i][cnt];
mean[j][i] = sum_diff[j][i][_PB_LENGTH - 1];
}
}
#pragma omp parallel for
for (i = 0; i <= _PB_MAXGRID - 1; i++)
path[0][i] = mean[0][i];
#pragma omp parallel for private (i) collapse(2) schedule(static)
for (j = 1; j <= _PB_MAXGRID - 1; j++)
for (i = j; i <= _PB_MAXGRID - 1; i++)
path[j][i] = path[j - 1][i - 1] + mean[j][i];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int niter = NITER;
int maxgrid = MAXGRID;
int length = LENGTH;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(sum_tang, DATA_TYPE __attribute__((annotate("target('sum_tang') scalar()"))), MAXGRID, MAXGRID, maxgrid, maxgrid);
POLYBENCH_2D_ARRAY_DECL(mean, DATA_TYPE __attribute__((annotate("target('mean') scalar(range(0, 5000) final)"))), MAXGRID, MAXGRID, maxgrid, maxgrid);
POLYBENCH_2D_ARRAY_DECL(path, DATA_TYPE __attribute__((annotate("target('path') scalar(range(0, 5000) final)"))), MAXGRID, MAXGRID, maxgrid, maxgrid);
POLYBENCH_3D_ARRAY_DECL(diff, DATA_TYPE __attribute__((annotate("target('diff') scalar(range(0, 5000) final)"))), MAXGRID, MAXGRID, LENGTH, maxgrid, maxgrid, length);
POLYBENCH_3D_ARRAY_DECL(sum_diff, DATA_TYPE __attribute__((annotate("target('sum_diff') scalar(range(0, 5000) final)"))), MAXGRID, MAXGRID, LENGTH, maxgrid, maxgrid, length);
/* Initialize array(s). */
init_array (maxgrid,
POLYBENCH_ARRAY(sum_tang),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(path));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_reg_detect (niter, maxgrid, length,
POLYBENCH_ARRAY(sum_tang),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(path),
POLYBENCH_ARRAY(diff),
POLYBENCH_ARRAY(sum_diff));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(maxgrid, POLYBENCH_ARRAY(path)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(sum_tang);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(path);
POLYBENCH_FREE_ARRAY(diff);
POLYBENCH_FREE_ARRAY(sum_diff);
return 0;
}
|
billownoise.h | #pragma once
#ifndef BILLOW_NOISE_H
#define BILLOW_NOISE_H
#include "noisecommon.h"
#define DEFAULT_BILLOW_FREQUENCY 1.0
#define DEFAULT_BILLOW_LACUNARITY 2.0
#define DEFAULT_BILLOW_PERSISTENCE 0.5
#define DEFAULT_BILLOW_OCTAVE_COUNT 6
#define DEFAULT_BILLOW_SEED 0
#define DEFAULT_BILLOW_POSITION_X 0.0
#define DEFAULT_BILLOW_POSITION_Y 0.0
#define DEFAULT_BILLOW_POSITION_Z 0.0
#define DEFAULT_BILLOW_STEP 0.01
#define DEFAULT_BILLOW_PARALLEL false
#define DEFAULT_BILLOW_QUALITY QUALITY_STANDARD
struct BillowNoise {
float frequency;
float lacunarity;
float persistence;
unsigned char octave_count;
int seed;
float position[3];
float step;
bool parallel;
float *(*billow_func)(struct BillowNoise *, size_t, size_t, size_t);
enum NoiseQuality noise_quality;
};
static inline float *billow_noise_eval_1d(struct BillowNoise *billow_noise, size_t x_size);
static inline float *billow_noise_eval_2d(struct BillowNoise *billow_noise, size_t x_size, size_t y_size);
static inline float *billow_noise_eval_3d(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float billow_noise_eval_3d_single(struct BillowNoise *billow_noise, float x_pos, float y_pos, float z_pos);
static inline float *billow_noise_eval_3d_fallback(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float *billow_noise_eval_3d_sse2(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float *billow_noise_eval_3d_sse4_1(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float *billow_noise_eval_3d_avx(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float *billow_noise_eval_3d_avx2(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float *billow_noise_eval_3d_avx512(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline void billow_noise_init(struct BillowNoise *billow_noise) {
billow_noise->frequency = DEFAULT_BILLOW_FREQUENCY;
billow_noise->lacunarity = DEFAULT_BILLOW_LACUNARITY;
billow_noise->persistence = DEFAULT_BILLOW_PERSISTENCE;
billow_noise->octave_count = DEFAULT_BILLOW_OCTAVE_COUNT;
billow_noise->seed = DEFAULT_BILLOW_SEED;
billow_noise->noise_quality = DEFAULT_BILLOW_QUALITY;
billow_noise->position[0] = DEFAULT_BILLOW_POSITION_X;
billow_noise->position[1] = DEFAULT_BILLOW_POSITION_Y;
billow_noise->position[2] = DEFAULT_BILLOW_POSITION_X;
billow_noise->step = DEFAULT_BILLOW_STEP;
billow_noise->parallel = DEFAULT_BILLOW_PARALLEL;
switch (detect_simd_support()) {
#ifdef ARCH_32_64
case NOISE_SIMD_AVX512F:
billow_noise->billow_func = &billow_noise_eval_3d_fallback;
break;
case NOISE_SIMD_AVX2:
billow_noise->billow_func = &billow_noise_eval_3d_avx2;
break;
case NOISE_SIMD_AVX:
billow_noise->billow_func = &billow_noise_eval_3d_avx;
break;
case NOISE_SIMD_SSE4_1:
billow_noise->billow_func = &billow_noise_eval_3d_sse4_1;
break;
case NOISE_SIMD_SSE2:
billow_noise->billow_func = &billow_noise_eval_3d_sse2;
break;
#else
case SIMD_NEON:
billow_noise->billow_func = &billow_noise_eval_3d_fallback;
break;
#endif
default:
billow_noise->billow_func = &billow_noise_eval_3d_fallback;
break;
}
}
static inline float *billow_noise_eval_1d(struct BillowNoise *billow_noise, size_t x_size) {
return billow_noise->billow_func(billow_noise, x_size, 1, 1);
}
static inline float *billow_noise_eval_2d(struct BillowNoise *billow_noise, size_t x_size, size_t y_size) {
return billow_noise->billow_func(billow_noise, x_size, y_size, 1);
}
static inline float *billow_noise_eval_3d(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
return billow_noise->billow_func(billow_noise, x_size, y_size, z_size);
}
static inline float billow_noise_eval_3d_single(struct BillowNoise *billow_noise, float x_pos, float y_pos, float z_pos) {
float x = (billow_noise->position[0] + (x_pos * billow_noise->step)) * billow_noise->frequency;
float y = (billow_noise->position[1] + (y_pos * billow_noise->step)) * billow_noise->frequency;
float z = (billow_noise->position[2] + (z_pos * billow_noise->step)) * billow_noise->frequency;
float value = 0.0;
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
float nx = make_int_32_range(x);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
float signal = gradient_coherent_noise_3d(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = 2.0 * fabs(signal) - 1.0;
value += signal * cur_persistence;
x *= billow_noise->lacunarity;
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value += 0.5;
return value;
}
static inline float *billow_noise_eval_3d_fallback(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
#ifdef CUSTOM_ALLOCATOR
float *noise_set = malloc(sizeof(float) * x_size * y_size * z_size);
#else
float *noise_set = noise_allocate(sizeof(float), sizeof(float) * x_size * y_size * z_size);
#endif
#pragma omp parallel for collapse(3) if (billow_noise->parallel)
for (int z_dim = 0; z_dim < z_size; z_dim++) {
for (int y_dim = 0; y_dim < y_size; y_dim++) {
for (int x_dim = 0; x_dim < x_size; x_dim++) {
float x = (billow_noise->position[0] * billow_noise->frequency) + (x_dim * billow_noise->step);
float y = (billow_noise->position[1] * billow_noise->frequency) + (y_dim * billow_noise->step);
float z = (billow_noise->position[2] * billow_noise->frequency) + (z_dim * billow_noise->step);
float value = 0.0;
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
float nx = make_int_32_range(x);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
float signal = gradient_coherent_noise_3d(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = 2.0 * fabs(signal) - 1.0;
value += signal * cur_persistence;
x *= billow_noise->lacunarity;
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value += 0.5;
*(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size)))) = value;
}
}
}
return noise_set;
}
#ifdef ARCH_32_64
#ifdef SIMD_SSE2
static inline float *billow_noise_eval_3d_sse2(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
float *noise_set = noise_allocate(sizeof(__m128), sizeof(float) * x_size * y_size * z_size);
#pragma omp parallel for collapse(3) if (billow_noise->parallel)
for (int z_dim = 0; z_dim < z_size; z_dim++) {
for (int y_dim = 0; y_dim < y_size; y_dim++) {
for (int x_dim = 0; x_dim < x_size; x_dim += 4) {
__m128 x_vec = _mm_mul_ps(_mm_add_ps(_mm_set1_ps(billow_noise->position[0]), _mm_mul_ps(_mm_set_ps(x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm_set1_ps(billow_noise->step))), _mm_set1_ps(billow_noise->frequency));
float y = (billow_noise->position[1] + (y_dim * billow_noise->step)) * billow_noise->frequency;
float z = (billow_noise->position[2] + (z_dim * billow_noise->step)) * billow_noise->frequency;
__m128 value = _mm_set1_ps(0.0);
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
__m128 nx = make_int_32_range_sse2(x_vec);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
__m128 signal = gradient_coherent_noise_3d_sse2(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = _mm_sub_ps(_mm_mul_ps(_mm_set1_ps(2.0), _mm_andnot_ps(_mm_set1_ps(-0.0), signal)), _mm_set1_ps(1.0));
value = _mm_add_ps(value, _mm_mul_ps(signal, _mm_set1_ps(cur_persistence)));
x_vec = _mm_mul_ps(x_vec, _mm_set1_ps(billow_noise->lacunarity));
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value = _mm_add_ps(value, _mm_set1_ps(0.5));
_mm_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), value);
}
}
}
return noise_set;
}
#endif
#ifdef SIMD_SSE41
static inline float *billow_noise_eval_3d_sse4_1(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
float *noise_set = noise_allocate(sizeof(__m128), sizeof(float) * x_size * y_size * z_size);
#pragma omp parallel for collapse(3) if (billow_noise->parallel)
for (int z_dim = 0; z_dim < z_size; z_dim++) {
for (int y_dim = 0; y_dim < y_size; y_dim++) {
for (int x_dim = 0; x_dim < x_size; x_dim += 4) {
__m128 x_vec = _mm_mul_ps(_mm_add_ps(_mm_set1_ps(billow_noise->position[0]), _mm_mul_ps(_mm_set_ps(x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm_set1_ps(billow_noise->step))), _mm_set1_ps(billow_noise->frequency));
float y = (billow_noise->position[1] + (y_dim * billow_noise->step)) * billow_noise->frequency;
float z = (billow_noise->position[2] + (z_dim * billow_noise->step)) * billow_noise->frequency;
__m128 value = _mm_set1_ps(0.0);
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
__m128 nx = make_int_32_range_sse2(x_vec);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
__m128 signal = gradient_coherent_noise_3d_sse4_1(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = _mm_sub_ps(_mm_mul_ps(_mm_set1_ps(2.0), _mm_andnot_ps(_mm_set1_ps(-0.0), signal)), _mm_set1_ps(1.0));
value = _mm_add_ps(value, _mm_mul_ps(signal, _mm_set1_ps(cur_persistence)));
x_vec = _mm_mul_ps(x_vec, _mm_set1_ps(billow_noise->lacunarity));
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value = _mm_add_ps(value, _mm_set1_ps(0.5));
_mm_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), value);
}
}
}
return noise_set;
}
#endif
#ifdef SIMD_AVX
static inline float *billow_noise_eval_3d_avx(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
float *noise_set = noise_allocate(sizeof(__m256), sizeof(float) * x_size * y_size * z_size);
#pragma omp parallel for collapse(3) if (billow_noise->parallel)
for (int z_dim = 0; z_dim < z_size; z_dim++) {
for (int y_dim = 0; y_dim < y_size; y_dim++) {
for (int x_dim = 0; x_dim < x_size; x_dim += 8) {
__m256 x_vec = _mm256_mul_ps(_mm256_add_ps(_mm256_set1_ps(billow_noise->position[0]), _mm256_mul_ps(_mm256_set_ps(x_dim + 7.0, x_dim + 6.0, x_dim + 5.0, x_dim + 4.0, x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm256_set1_ps(billow_noise->step))), _mm256_set1_ps(billow_noise->frequency));
float y = (billow_noise->position[1] + (y_dim * billow_noise->step)) * billow_noise->frequency;
float z = (billow_noise->position[2] + (z_dim * billow_noise->step)) * billow_noise->frequency;
__m256 value = _mm256_set1_ps(0.0);
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
__m256 nx = make_int_32_range_avx(x_vec);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
__m256 signal = gradient_coherent_noise_3d_avx(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = _mm256_sub_ps(_mm256_mul_ps(_mm256_set1_ps(2.0), _mm256_andnot_ps(_mm256_set1_ps(-0.0), signal)), _mm256_set1_ps(1.0));
value = _mm256_add_ps(value, _mm256_mul_ps(signal, _mm256_set1_ps(cur_persistence)));
x_vec = _mm256_mul_ps(x_vec, _mm256_set1_ps(billow_noise->lacunarity));
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value = _mm256_add_ps(value, _mm256_set1_ps(0.5));
_mm256_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), value);
}
}
}
return noise_set;
}
#endif
#ifdef SIMD_AVX2
static inline float *billow_noise_eval_3d_avx2(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
float *noise_set = noise_allocate(sizeof(__m256), sizeof(float) * x_size * y_size * z_size);
#pragma omp parallel for collapse(3) if (billow_noise->parallel)
for (int z_dim = 0; z_dim < z_size; z_dim++) {
for (int y_dim = 0; y_dim < y_size; y_dim++) {
for (int x_dim = 0; x_dim < x_size; x_dim += 8) {
__m256 x_vec = _mm256_mul_ps(_mm256_add_ps(_mm256_set1_ps(billow_noise->position[0]), _mm256_mul_ps(_mm256_set_ps(x_dim + 7.0, x_dim + 6.0, x_dim + 5.0, x_dim + 4.0, x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm256_set1_ps(billow_noise->step))), _mm256_set1_ps(billow_noise->frequency));
float y = (billow_noise->position[1] + (y_dim * billow_noise->step)) * billow_noise->frequency;
float z = (billow_noise->position[2] + (z_dim * billow_noise->step)) * billow_noise->frequency;
__m256 value = _mm256_set1_ps(0.0);
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
__m256 nx = make_int_32_range_avx(x_vec);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
__m256 signal = gradient_coherent_noise_3d_avx2(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = _mm256_sub_ps(_mm256_mul_ps(_mm256_set1_ps(2.0), _mm256_andnot_ps(_mm256_set1_ps(-0.0), signal)), _mm256_set1_ps(1.0));
value = _mm256_add_ps(value, _mm256_mul_ps(signal, _mm256_set1_ps(cur_persistence)));
x_vec = _mm256_mul_ps(x_vec, _mm256_set1_ps(billow_noise->lacunarity));
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value = _mm256_add_ps(value, _mm256_set1_ps(0.5));
_mm256_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), value);
}
}
}
return noise_set;
}
#endif
#endif
#endif // BILLOW_NOISE_H
|
GB_binop__second_fc32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__second_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__second_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__second_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_fc32)
// A*D function (colscale): GB (_AxD__second_fc32)
// D*A function (rowscale): GB (_DxB__second_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__second_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__second_fc32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_fc32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// A pattern? 1
// B type: GxB_FC32_t
// B pattern? 0
// BinaryOp: cij = bij
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_FC32 || GxB_NO_SECOND_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__second_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_fc32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC32_t alpha_scalar ;
GxB_FC32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__second_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__second_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
vseprefine.c | /**
* @file vseprefine.c
* @brief Vertex separator refinement routines
* @author Dominique LaSalle <mtmetis@domnet.org>
* Copyright 2014, Regents of the University of Minnesota
* @version 1
* @date 2014-10-13
*/
#ifndef MTMETIS_REFINE_C
#define MTMETIS_REFINE_C
#include "vseprefine.h"
#include "check.h"
#define ONE_SIDED 1
#include <metis.h>
/******************************************************************************
* TYPES ***********************************************************************
******************************************************************************/
typedef enum lock_state_type {
UNLOCKED = -1,
PARTA_LOCKED = MTMETIS_VSEP_PARTA,
PARTB_LOCKED = MTMETIS_VSEP_PARTB,
SEP_LOCKED = MTMETIS_VSEP_SEP,
BOUNDARY_LOCKED = MTMETIS_VSEP_SEP+1
} lock_state_type;
typedef struct update_type {
vtx_type v;
} update_type;
/******************************************************************************
* DOMLIB IMPORTS **************************************************************
******************************************************************************/
#define DLPQ_PREFIX vw
#define DLPQ_KEY_T wgt_type
#define DLPQ_VAL_T vtx_type
#define DLPQ_STATIC
#include "dlpq_headers.h"
#undef DLPQ_STATIC
#undef DLPQ_VAL_T
#undef DLPQ_KEY_T
#undef DLPQ_PREFIX
#define DLBUFFER_PREFIX update
#define DLBUFFER_TYPE_T update_type
#define DLBUFFER_STATIC 1
#include "dlbuffer_headers.h"
#undef DLBUFFER_STATIC
#undef DLBUFFER_TYPE_T
#undef DLBUFFER_PREFIX
#define DLCB_PREFIX update
#define DLCB_TYPE_T update_type
#define DLCB_BUFFER_PREFIX update_buffer
#define DLCB_BUFFER_TYPE_T update_buffer_t
#define DLCB_STATIC 1
#include "dlcb_headers.h"
#undef DLCB_STATIC
#undef DLCB_BUFFER_TYPE_T
#undef DLCB_BUFFER_PREFIX
#undef DLCB_TYPE_T
#undef DLCB_PREFIX
/******************************************************************************
* CONSTANTS *******************************************************************
******************************************************************************/
static size_t const SERIAL_FM_FACTOR = 8192;
/******************************************************************************
* PRIVATE FUNCTIONS ***********************************************************
******************************************************************************/
static inline int S_valid_move(
vtx_type const v,
pid_type const side,
int const * const locked)
{
int l;
l = locked[v];
return (l == (int)(side) || l == UNLOCKED);
}
static inline void S_lock(
vtx_type const v,
int * const locked,
pid_type const side)
{
locked[v] = side;
}
static inline void S_sync_pwgts(
tid_type const myid,
wgt_type * const gpwgts,
wgt_type * const lpwgts,
dlthread_comm_t const comm)
{
/* turn local pwgts into deltas */
lpwgts[0] -= gpwgts[0];
lpwgts[1] -= gpwgts[1];
lpwgts[MTMETIS_VSEP_SEP] -= gpwgts[MTMETIS_VSEP_SEP];
/* create global deltas */
wgt_dlthread_sumareduce(lpwgts,3,comm);
/* set local pwgts to be global pwgts */
lpwgts[0] += gpwgts[0];
lpwgts[1] += gpwgts[1];
lpwgts[MTMETIS_VSEP_SEP] += gpwgts[MTMETIS_VSEP_SEP];
dlthread_barrier(comm);
if (myid == 0) {
/* re-sync global pwgts */
gpwgts[0] = lpwgts[0];
gpwgts[1] = lpwgts[1];
gpwgts[MTMETIS_VSEP_SEP] = lpwgts[MTMETIS_VSEP_SEP];
}
dlthread_barrier(comm);
}
static inline void S_update_neighbor(
pid_type const side,
vtx_type const v,
tid_type const myid,
vw_pq_t * const q,
vtx_iset_t * const bnd,
vsnbrinfo_type * const * const gnbrinfo,
wgt_type * const pwgts,
graph_type const * const graph)
{
vtx_type k, lvtx;
adj_type j;
tid_type nbrid;
wgt_type gain;
vtx_type const mynvtxs = graph->mynvtxs[myid];
adj_type const * const xadj = graph->xadj[myid];
vtx_type const * const adjncy = graph->adjncy[myid];
wgt_type const * const * const gvwgt = (wgt_type const **)graph->vwgt;
pid_type * const * const gwhere = graph->where;
pid_type const other = side ^ 0x01;
/* if I own the neighboring vertex, perform the update myself */
if (gwhere[myid][v] == other) {
/* re-calculate my nrbinfo */
S_calc_conn(v,myid,mynvtxs,xadj,adjncy,gvwgt,(pid_type const **)gwhere, \
graph->dist,gnbrinfo[myid][v].con);
/* update neighbors of the vertices pulled into the separator */
for (j=xadj[v];j<xadj[v+1];++j) {
k = adjncy[j];
if (k < mynvtxs) {
lvtx = k;
nbrid = myid;
} else {
lvtx = gvtx_to_lvtx(k,graph->dist);
nbrid = gvtx_to_tid(k,graph->dist);
}
if (gwhere[nbrid][lvtx] == MTMETIS_VSEP_SEP) {
/* update connectivity */
#pragma omp atomic
gnbrinfo[nbrid][lvtx].con[other] -= gvwgt[myid][v];
}
if (q && nbrid == myid && gwhere[nbrid][lvtx] == MTMETIS_VSEP_SEP) {
/* this vertex is in the separator, and is now more likely
* to move. */
gain = gvwgt[nbrid][lvtx] - gnbrinfo[nbrid][lvtx].con[other];
if (vw_pq_contains(lvtx,q)) {
vw_pq_update(gain,lvtx,q);
} else {
vw_pq_push(gain,lvtx,q);
}
}
}
/* pull this vertex into the separator after updating neighbors */
gwhere[myid][v] = MTMETIS_VSEP_SEP;
/* actually move the vertex */
vtx_iset_add(v,bnd);
/* add the vertex to the priority queue for further movement */
if (q) {
gain = gvwgt[myid][v] - gnbrinfo[myid][v].con[other];
vw_pq_push(gain,v,q);
}
/* update the partition weights */
pwgts[other] -= gvwgt[myid][v];
pwgts[MTMETIS_VSEP_SEP] += gvwgt[myid][v];
}
}
static inline pid_type S_pick_side_local(
tid_type const myid,
wgt_type const * const vwgt,
wgt_type const * const pwgts,
wgt_type const maxpwgt,
vsnbrinfo_type const * const nbrinfo,
vw_pq_t * const * const q)
{
vtx_type p, v;
pid_type side;
/* part next move properties */
vtx_type vtx[2];
wgt_type wgt[2], pri[2];
/* determine stats for each side */
for (p=0;p<MTMETIS_VSEP_SEP;++p) {
if (q[p]->size > 0) {
v = vtx[p] = vw_pq_peek(q[p]);
pri[p] = vwgt[v] - nbrinfo[v].con[p^0x01];
DL_ASSERT(pri[p] == vw_pq_top(q[p]),"Vertex %"PF_VTX_T" has wrong " \
"priority (%"PF_WGT_T" vs %"PF_WGT_T") in queue %"PF_PID_T"\n", \
v,vw_pq_top(q[p]),pri[p],p);
wgt[p] = pwgts[p] + vwgt[v];
} else {
pri[p] = -maxpwgt; /* below what is possible for a valid priority */
vtx[p] = NULL_VTX;
wgt[p] = NULL_WGT;
}
}
/* figure out which side we'll use -- this seems like I could do it
* better */
if (vtx[MTMETIS_VSEP_PARTA] == NULL_VTX && \
vtx[MTMETIS_VSEP_PARTB] == NULL_VTX) {
/* exit loop -- both queues are empty */
return NULL_PID;
} else if (pri[MTMETIS_VSEP_PARTA] > pri[MTMETIS_VSEP_PARTB]) {
side = MTMETIS_VSEP_PARTA;
} else if (pri[MTMETIS_VSEP_PARTA] < pri[MTMETIS_VSEP_PARTB]) {
side = MTMETIS_VSEP_PARTB;
} else {
if (wgt[MTMETIS_VSEP_PARTA] < wgt[MTMETIS_VSEP_PARTB]) {
side = MTMETIS_VSEP_PARTA;
} else if (wgt[MTMETIS_VSEP_PARTA] > wgt[MTMETIS_VSEP_PARTB]) {
side = MTMETIS_VSEP_PARTB;
} else {
/* alternate sides */
side = (q[MTMETIS_VSEP_PARTA]->size + q[MTMETIS_VSEP_PARTB]->size) % 2;
}
}
/* make sure it will be balanced */
if (wgt[side] > maxpwgt) {
side = side ^ 0x01;
if (vtx[side] == NULL_VTX) {
/* the other side is empty */
return NULL_PID;
}
}
DL_ASSERT(q[side]->size > 0,"Choosing side with empty queue");
return side;
}
static inline pid_type S_pick_side(
graph_type const * const graph,
wgt_type const * const pwgts,
wgt_type const maxpwgt,
vsnbrinfo_type const * const * const gnbrinfo,
vw_pq_t * const * const q)
{
vtx_type p, v, g;
tid_type myid;
pid_type side;
/* part next move properties */
vtx_type vtx[2];
wgt_type wgt[2], pri[2];
wgt_type const * const * const gvwgt = (wgt_type const **)graph->vwgt;
/* determine stats for each side */
for (p=0;p<MTMETIS_VSEP_SEP;++p) {
if (q[p]->size > 0) {
g = vtx[p] = vw_pq_peek(q[p]);
v = gvtx_to_lvtx(g,graph->dist);
myid = gvtx_to_tid(g,graph->dist);
pri[p] = gvwgt[myid][v] - gnbrinfo[myid][v].con[p^0x01];
DL_ASSERT(pri[p] == vw_pq_top(q[p]),"Vertex %"PF_VTX_T" has wrong " \
"priority (%"PF_WGT_T" vs %"PF_WGT_T") in queue %"PF_PID_T"\n", \
v,vw_pq_top(q[p]),pri[p],p);
wgt[p] = pwgts[p] + gvwgt[myid][v];
} else {
pri[p] = -maxpwgt; /* below what is possible for a valid priority */
vtx[p] = NULL_VTX;
wgt[p] = NULL_WGT;
}
}
/* figure out which side we'll use -- this seems like I could do it
* better */
if (vtx[MTMETIS_VSEP_PARTA] == NULL_VTX && \
vtx[MTMETIS_VSEP_PARTB] == NULL_VTX) {
/* exit loop -- both queues are empty */
return NULL_PID;
} else if (pri[MTMETIS_VSEP_PARTA] > pri[MTMETIS_VSEP_PARTB]) {
side = MTMETIS_VSEP_PARTA;
} else if (pri[MTMETIS_VSEP_PARTA] < pri[MTMETIS_VSEP_PARTB]) {
side = MTMETIS_VSEP_PARTB;
} else {
if (wgt[MTMETIS_VSEP_PARTA] < wgt[MTMETIS_VSEP_PARTB]) {
side = MTMETIS_VSEP_PARTA;
} else if (wgt[MTMETIS_VSEP_PARTA] > wgt[MTMETIS_VSEP_PARTB]) {
side = MTMETIS_VSEP_PARTB;
} else {
/* alternate sides */
side = (q[MTMETIS_VSEP_PARTA]->size + q[MTMETIS_VSEP_PARTB]->size) % 2;
}
}
/* make sure it will be balanced */
if (wgt[side] > maxpwgt) {
side = side ^ 0x01;
if (vtx[side] == NULL_VTX) {
/* the other side is empty */
return NULL_PID;
}
}
DL_ASSERT(q[side]->size > 0,"Choosing side with empty queue");
return side;
}
static void S_fix_iface(
ctrl_type * const ctrl,
graph_type const * const graph,
vsinfo_type * const vsinfo,
vtx_type const * const iface,
vtx_type const niface)
{
vtx_type i, k, v;
adj_type j;
tid_type const myid = dlthread_get_id(ctrl->comm);
vtx_type const mynvtxs = graph->mynvtxs[myid];
adj_type const * const xadj = graph->xadj[myid];
vtx_type const * const adjncy = graph->adjncy[myid];
wgt_type const * const * const gvwgt = (wgt_type const **)graph->vwgt;
vsnbrinfo_type * const nbrinfo = vsinfo->nbrinfo;
vtx_iset_t * const bnd = vsinfo->bnd;
if (iface == NULL || niface > bnd->size*7) {
for (i=0;i<bnd->size;++i) {
v = vtx_iset_get(i,bnd);
for (j=xadj[v];j<xadj[v+1];++j) {
k = adjncy[j];
if (k >= mynvtxs) {
S_calc_conn(v,myid,mynvtxs,xadj,adjncy,gvwgt, \
(pid_type const **)graph->where,graph->dist,nbrinfo[v].con);
break;
}
}
}
} else {
for (i=0;i<niface;++i) {
v = iface[i];
if (graph->where[myid][v] == MTMETIS_VSEP_SEP) {
S_calc_conn(v,myid,mynvtxs,xadj,adjncy,gvwgt, \
(pid_type const **)graph->where,graph->dist,nbrinfo[v].con);
}
}
}
}
static void S_metis_refine(
ctrl_type const * const ctrl,
graph_type * const graph,
int const * const locked)
{
vtx_type i, k;
adj_type j;
idx_t lnedges;
wgt_type lpwgts[3];
idx_t * xadj;
idx_t * adjncy;
tid_type const myid = dlthread_get_id(ctrl->comm);
vtx_type const mynvtxs = graph->mynvtxs[myid];
xadj = malloc(sizeof(idx_t)*(mynvtxs+1));
adjncy = malloc(sizeof(idx_t)*(graph->xadj[myid][mynvtxs]));
lnedges = 0;
xadj[0] = lnedges;
for (i=0;i<mynvtxs;++i) {
for (j=graph->xadj[myid][i];j<graph->xadj[myid][i+1];++j) {
k = graph->adjncy[myid][j];
if (k < mynvtxs) {
adjncy[lnedges] = k;
++lnedges;
}
}
xadj[i+1] = lnedges;
}
__METIS_NodeRefine(graph->mynvtxs[myid],xadj,(idx_t*)graph->vwgt[myid], \
adjncy,(idx_t*)graph->where[myid],(idx_t*)locked,1.03);
wgt_set(lpwgts,0,3);
for (i=0;i<mynvtxs;++i) {
lpwgts[graph->where[myid][i]] += graph->vwgt[myid][i];
}
wgt_dlthread_sumareduce(lpwgts,3,ctrl->comm);
if (myid == 0) {
wgt_copy(graph->pwgts,lpwgts,3);
graph->minsep = graph->pwgts[2];
}
dlthread_barrier(ctrl->comm);
dl_free(adjncy);
dl_free(xadj);
}
/******************************************************************************
* PARALLEL REFINEMENT PASSES **************************************************
******************************************************************************/
static vtx_type S_flow_GREEDY(
ctrl_type * const ctrl,
graph_type * const graph,
vsinfo_type * const vsinfo,
vsnbrinfo_type * const * const gnbrinfo,
update_combuffer_t * const combuffer,
vw_pq_t * const q,
pid_type const side,
wgt_type * const lpwgts,
wgt_type const maxpwgt)
{
vtx_type i, k, v, nmoves, lvtx;
adj_type j;
wgt_type newwgt, gain;
pid_type other;
tid_type nbrid, o, t;
update_type up;
vsnbrinfo_type * myrinfo;
update_buffer_t * updates;
tid_type const myid = dlthread_get_id(ctrl->comm);
tid_type const nthreads = dlthread_get_nthreads(ctrl->comm);
vtx_type const mynvtxs = graph->mynvtxs[myid];
adj_type const * const xadj = graph->xadj[myid];
vtx_type const * const adjncy = graph->adjncy[myid];
wgt_type const * const * const gvwgt = (wgt_type const **)graph->vwgt;
pid_type * const * const gwhere = graph->where;
vsnbrinfo_type * const nbrinfo = vsinfo->nbrinfo;
vtx_iset_t * const bnd = vsinfo->bnd;
/* the side I'm not moving to */
other = side ^ 0x01;
nmoves = 0;
/* add my boundary vertices to the queue */
vw_pq_clear(q);
for (i=0;i<bnd->size;++i) {
v = bnd->ind[i];
DL_ASSERT_EQUALS(MTMETIS_VSEP_SEP,gwhere[myid][v],"%"PF_PID_T);
myrinfo = nbrinfo + v;
gain = gvwgt[myid][v] - myrinfo->con[other];
vw_pq_push(gain,v,q);
}
/* make possible moves */
while (q->size > 0) {
v = vw_pq_pop(q);
DL_ASSERT_EQUALS(MTMETIS_VSEP_SEP,gwhere[myid][v],"%"PF_PID_T);
DL_ASSERT_EQUALS(vtx_iset_contains(v,bnd),1,"%d");
myrinfo = nbrinfo + v;
gain = gvwgt[myid][v] - myrinfo->con[other];
if (gain < 0 || (gain == 0 && lpwgts[side] >= lpwgts[other])) {
/* only move vertices with positive gain */
break;
}
#if 0
if (lpwgts[side] > lpwgts[other] && \
gain < gvwgt[myid][v] - myrinfo->con[side]) {
/* if its better to move this vertex to the other side,
* don't move it here */
continue;
}
#endif
newwgt = lpwgts[side] + gvwgt[myid][v];
if (newwgt > maxpwgt) {
/* this vertex will put us over the limit */
continue;
}
/* Once we have selected a vertex to move, we need to update several
* things:
* -the partition and separator weights
* -pull the neighboring vertices in 'other' into the separator
* -update the priorities of the affected vertives
*/
/* adjust partition weights */
lpwgts[side] += gvwgt[myid][v];
lpwgts[MTMETIS_VSEP_SEP] -= gvwgt[myid][v];
++nmoves;
/* at this point, we have decided to make the move */
gwhere[myid][v] = side;
/* remove the vertex from the boundary */
vtx_iset_remove(v,bnd);
/* process edges */
for (j=xadj[v];j<xadj[v+1];++j) {
k = adjncy[j];
if (k < mynvtxs) {
lvtx = k;
nbrid = myid;
} else {
lvtx = gvtx_to_lvtx(k,graph->dist);
nbrid = gvtx_to_tid(k,graph->dist);
}
/* update priorities of neighboring */
if (gwhere[nbrid][lvtx] == MTMETIS_VSEP_SEP) {
#pragma omp atomic
gnbrinfo[nbrid][lvtx].con[side] += gvwgt[myid][v];
}
if (nbrid == myid) {
S_update_neighbor(side,lvtx,myid,q,bnd,gnbrinfo,lpwgts,graph);
} else {
/* let the neighboring thread know about the move */
up.v = lvtx;
update_combuffer_add(nbrid,up,combuffer);
}
}
}
/* implicit barrier */
update_combuffer_send(combuffer);
/* recieve updates from other threads */
for (o=1;o<nthreads;++o) {
t = (myid + o) % nthreads;
updates = update_combuffer_get(t,combuffer);
for (i=0;i<(vtx_type)updates->size;++i) {
up = updates->elements[i];
v = up.v;
S_update_neighbor(side,v,myid,NULL,bnd,gnbrinfo,lpwgts,graph);
}
}
update_combuffer_clear(combuffer);
return nmoves;
}
static vtx_type S_flow_GREEDYI(
ctrl_type * const ctrl,
graph_type * const graph,
vsinfo_type * const vsinfo,
vsnbrinfo_type * const * const gnbrinfo,
update_combuffer_t * const combuffer,
vw_pq_t * const q,
pid_type const side,
wgt_type * const lpwgts,
wgt_type const maxpwgt,
vtx_type const * const iface,
vtx_type const niface)
{
vtx_type i, k, v, nmoves, lvtx;
adj_type j;
wgt_type newwgt, gain;
pid_type other;
tid_type nbrid, o, t;
update_type up;
vsnbrinfo_type * myrinfo;
update_buffer_t * updates;
tid_type const myid = dlthread_get_id(ctrl->comm);
tid_type const nthreads = dlthread_get_nthreads(ctrl->comm);
vtx_type const mynvtxs = graph->mynvtxs[myid];
adj_type const * const xadj = graph->xadj[myid];
vtx_type const * const adjncy = graph->adjncy[myid];
wgt_type const * const * const gvwgt = (wgt_type const **)graph->vwgt;
pid_type * const * const gwhere = graph->where;
vsnbrinfo_type * const nbrinfo = vsinfo->nbrinfo;
vtx_iset_t * const bnd = vsinfo->bnd;
/* the side I'm not moving to */
other = side ^ 0x01;
nmoves = 0;
/* add my boundary vertices to the queue */
vw_pq_clear(q);
for (i=0;i<niface;++i) {
v = iface[i];
if (gwhere[myid][v] == MTMETIS_VSEP_SEP) {
myrinfo = nbrinfo + v;
gain = gvwgt[myid][v] - myrinfo->con[other];
vw_pq_push(gain,v,q);
}
}
/* make possible moves */
while (q->size > 0) {
v = vw_pq_pop(q);
DL_ASSERT_EQUALS(MTMETIS_VSEP_SEP,gwhere[myid][v],"%"PF_PID_T);
DL_ASSERT_EQUALS(vtx_iset_contains(v,bnd),1,"%d");
myrinfo = nbrinfo + v;
gain = gvwgt[myid][v] - myrinfo->con[other];
if (gain < 0 || (gain == 0 && lpwgts[side] >= lpwgts[other])) {
/* only move vertices with positive gain */
break;
}
#if 0
if (lpwgts[side] > lpwgts[other] && \
gain < gvwgt[myid][v] - myrinfo->con[side]) {
/* if its better to move this vertex to the other side,
* don't move it here */
continue;
}
#endif
newwgt = lpwgts[side] + gvwgt[myid][v];
if (newwgt > maxpwgt) {
/* this vertex will put us over the limit */
continue;
}
/* Once we have selected a vertex to move, we need to update several
* things:
* -the partition and separator weights
* -pull the neighboring vertices in 'other' into the separator
* -update the priorities of the affected vertives
*/
/* adjust partition weights */
lpwgts[side] += gvwgt[myid][v];
lpwgts[MTMETIS_VSEP_SEP] -= gvwgt[myid][v];
++nmoves;
/* at this point, we have decided to make the move */
gwhere[myid][v] = side;
/* remove the vertex from the boundary */
vtx_iset_remove(v,bnd);
/* process edges */
for (j=xadj[v];j<xadj[v+1];++j) {
k = adjncy[j];
if (k < mynvtxs) {
lvtx = k;
nbrid = myid;
} else {
lvtx = gvtx_to_lvtx(k,graph->dist);
nbrid = gvtx_to_tid(k,graph->dist);
}
/* update priorities of neighboring */
if (gwhere[nbrid][lvtx] == MTMETIS_VSEP_SEP) {
#pragma omp atomic
gnbrinfo[nbrid][lvtx].con[side] += gvwgt[myid][v];
}
if (nbrid == myid) {
S_update_neighbor(side,lvtx,myid,NULL,bnd,gnbrinfo,lpwgts,graph);
} else {
/* let the neighboring thread know about the move */
up.v = lvtx;
update_combuffer_add(nbrid,up,combuffer);
}
}
}
/* implicit barrier */
update_combuffer_send(combuffer);
/* recieve updates from other threads */
for (o=1;o<nthreads;++o) {
t = (myid + o) % nthreads;
updates = update_combuffer_get(t,combuffer);
for (i=0;i<(vtx_type)updates->size;++i) {
up = updates->elements[i];
v = up.v;
S_update_neighbor(side,v,myid,NULL,bnd,gnbrinfo,lpwgts,graph);
}
}
update_combuffer_clear(combuffer);
return nmoves;
}
static vtx_type S_flow_SFM(
ctrl_type * const ctrl,
graph_type * const graph,
vsinfo_type * const vsinfo,
vtx_type * const moves,
vtx_type * const pullmk,
vtx_type * const pulled,
vw_pq_t * const q,
int * const locked,
pid_type const side,
wgt_type * const lpwgts,
wgt_type const maxpwgt)
{
vtx_type i, k, v, m, nmoves, minmove;
adj_type j, npulled, l;
wgt_type minsep, newbal, minbal, gain, cursep;
pid_type p, other;
vsnbrinfo_type * myrinfo;
tid_type const myid = dlthread_get_id(ctrl->comm);
tid_type const nthreads = dlthread_get_nthreads(ctrl->comm);
vtx_type const mynvtxs = graph->mynvtxs[myid];
adj_type const * const xadj = graph->xadj[myid];
vtx_type const * const adjncy = graph->adjncy[myid];
wgt_type const * const * const gvwgt = (wgt_type const **)graph->vwgt;
wgt_type const * const vwgt = graph->vwgt[myid];
pid_type * const * const gwhere = graph->where;
pid_type * const where = gwhere[myid];
vsnbrinfo_type * const nbrinfo = vsinfo->nbrinfo;
vtx_iset_t * const bnd = vsinfo->bnd;
vtx_type const limit = ctrl->hillsize/sqrt(nthreads);
other = side ^ 0x01;
nmoves = 0;
minmove = 0;
npulled = 0;
pullmk[1] = 0;
minbal = wgt_abs_diff(lpwgts[0],lpwgts[1]);
vw_pq_clear(q);
for (i=0;i<bnd->size;++i) {
v = bnd->ind[i];
if (S_valid_move(v,side,locked)) {
DL_ASSERT_EQUALS(MTMETIS_VSEP_SEP,where[v],"%"PF_PID_T);
myrinfo = nbrinfo + v;
gain = vwgt[v] - myrinfo->con[other];
vw_pq_push(gain,v,q);
}
}
cursep = minsep = graph->minsep;
/* make possible moves */
while (nmoves < mynvtxs && q->size > 0) {
v = vw_pq_pop(q);
DL_ASSERT(S_valid_move(v,side,locked),"Pulled a vertex " \
"from %"PF_PID_T" but is locked to %"PF_PID_T,side,locked[v]);
DL_ASSERT_EQUALS(MTMETIS_VSEP_SEP,where[v],"%"PF_PID_T);
DL_ASSERT_EQUALS(vtx_iset_contains(v,bnd),1,"%d");
if (lpwgts[side] >= maxpwgt) {
break;
}
if (lpwgts[side] + vwgt[v] > maxpwgt) {
continue;
}
/* make sure we have space to record the vertices added to the
* separator */
if (npulled + xadj[v+1] - xadj[v] >= 2*mynvtxs-1) {
/* roll back to our best state */
break;
}
/* update our minimum objective value or check to make sure we
* haven't passed the search limit */
cursep = cursep - (vwgt[v]-nbrinfo[v].con[other]);
newbal = wgt_abs_diff(lpwgts[side]+vwgt[v], \
lpwgts[other]-nbrinfo[v].con[other]);
if (cursep < minsep || \
(cursep == minsep && newbal < minbal)) {
minsep = cursep;
minmove = nmoves+1;
/* we only need to abs this here, as if its negative, it means the
* move increases the balance */
minbal = newbal;
} else {
if (nmoves-minmove+1 > limit) {
/* revert back to best cut */
break;
}
}
/* Once we have selected a vertex to move, we need to update several
* things:
* -the partition and separator weights
* -pull the neighboring vertices in 'other' into the separator
* -update the priorities of the affected vertives
*/
/* at this point, we have decided to make the move */
myrinfo = nbrinfo + v;
where[v] = side;
moves[++nmoves] = v; /* count one up */
/* remove the vertex from the boundary -- and opposing pq */
vtx_iset_remove(v,bnd);
/* adjust partition weights */
lpwgts[side] += vwgt[v];
lpwgts[MTMETIS_VSEP_SEP] -= vwgt[v];
/* process edges */
for (j=xadj[v];j<xadj[v+1];++j) {
k = adjncy[j];
if (k < mynvtxs) {
if (where[k] == MTMETIS_VSEP_SEP) {
/* update priorities of neighboring vertiecs */
nbrinfo[k].con[side] += vwgt[v];
} else if (where[k] == other) {
/* pull this vertex into the separator */
DL_ASSERT_EQUALS(vtx_iset_contains(k,bnd),0,"%d");
/* record vertex being pulled into the separator */
pulled[npulled++] = k;
/* actually move the vertex */
vtx_iset_add(k,bnd);
where[k] = MTMETIS_VSEP_SEP;
/* calculate the connectivity */
S_calc_conn(k,myid,mynvtxs,xadj,adjncy,gvwgt, \
(pid_type const **)gwhere,graph->dist,nbrinfo[k].con);
/* update the partition weights */
lpwgts[other] -= vwgt[k];
lpwgts[MTMETIS_VSEP_SEP] += vwgt[k];
/* add the vertex to the priority queue for further movement */
if (S_valid_move(k,side,locked)) {
gain = vwgt[k] - nbrinfo[k].con[other];
vw_pq_push(gain,k,q);
}
/* update neighbors of the vertices pulled into the separator */
for (l=xadj[k];l<xadj[k+1];++l) {
m = adjncy[l];
if (m < mynvtxs) {
if (where[m] == MTMETIS_VSEP_SEP) {
/* this vertex is in the separator, and is now more likely
* to move. */
/* update connectivity */
nbrinfo[m].con[other] -= vwgt[k];
if (S_valid_move(m,side,locked)) {
/* update the value for moving this vertex in the same
* diretion */
if (vw_pq_contains(m,q)) {
gain = vwgt[m] - nbrinfo[m].con[other];
vw_pq_update(gain,m,q);
}
}
}
}
}
}
}
}
/* mark the number of vertices I pulled into the boundary */
pullmk[nmoves+1] = npulled;
}
par_dprintf("SFM1S: Pass %zu finished, sep = %"PF_WGT_T", rolling back %" \
PF_VTX_T"/%"PF_VTX_T" moves: %"PF_WGT_T":%"PF_WGT_T"\n",(size_t)0, \
lpwgts[MTMETIS_VSEP_SEP],nmoves-minmove,nmoves, \
lpwgts[MTMETIS_VSEP_PARTA],lpwgts[MTMETIS_VSEP_PARTB]);
/* rollback until we are back at the maximum state -- moves must be
* undone in reverse of the order in which they were made */
while (nmoves > minmove) {
v = moves[nmoves];
DL_ASSERT(side != MTMETIS_VSEP_SEP,"ATtempting to unmove vertex %" \
PF_VTX_T" in separator\n",v);
/* unmove this vertex */
lpwgts[MTMETIS_VSEP_SEP] += vwgt[v];
lpwgts[side] -= vwgt[v];
where[v] = MTMETIS_VSEP_SEP;
vtx_iset_add(v,bnd);
/* adjust priorities of neighboring vertices and re-calculate
* connectivity */
wgt_set(nbrinfo[v].con,0,2);
for (j=xadj[v];j<xadj[v+1];++j) {
k = adjncy[j];
if (k < mynvtxs) {
p = where[k];
if (p == MTMETIS_VSEP_SEP) {
nbrinfo[k].con[side] -= vwgt[v];
}
}
}
S_calc_conn(v,myid,mynvtxs,xadj,adjncy,gvwgt,(pid_type const **)gwhere, \
graph->dist,nbrinfo[v].con);
/* push nodes back out of the separator */
for (i=pullmk[nmoves];i<pullmk[nmoves+1];++i) {
k = pulled[i];
DL_ASSERT_EQUALS(where[k],MTMETIS_VSEP_SEP,"%"PF_PID_T);
/* move the vertex */
where[k] = other;
/* adjust partition weights */
lpwgts[other] += vwgt[k];
lpwgts[MTMETIS_VSEP_SEP] -= vwgt[k];
/* remove the vertex from the boundary */
vtx_iset_remove(k,bnd);
/* update neighbor-neighbor connectivity */
for (l=xadj[k];l<xadj[k+1];++l) {
m = adjncy[l];
if (m < mynvtxs) {
if (where[m] == MTMETIS_VSEP_SEP) {
nbrinfo[m].con[other] += vwgt[k];
}
}
}
S_calc_conn(k,myid,mynvtxs,xadj,adjncy,gvwgt,(pid_type const **)gwhere, \
graph->dist,nbrinfo[k].con);
}
/* go to the next move */
--nmoves;
}
return nmoves;
}
static void S_pass_BAL(
ctrl_type * const ctrl,
graph_type * const graph,
vsinfo_type * const vsinfo,
wgt_type const maxpwgt)
{
unsigned int seed;
vtx_type i, k, v, nmoves, lvtx;
adj_type j;
wgt_type gain;
tid_type o, nbrid, t;
pid_type side, other;
update_type up;
wgt_type mvwgt;
vtx_type * perm;
vsnbrinfo_type * myrinfo;
update_buffer_t * updates;
wgt_type lpwgts[3];
update_combuffer_t * combuffer;
vsnbrinfo_type ** gnbrinfo;
vw_pq_t * q;
tid_type const myid = dlthread_get_id(ctrl->comm);
tid_type const nthreads = dlthread_get_nthreads(ctrl->comm);
vtx_type const nvtxs = graph->nvtxs;
vtx_type const mynvtxs = graph->mynvtxs[myid];
adj_type const * const xadj = graph->xadj[myid];
vtx_type const * const adjncy = graph->adjncy[myid];
wgt_type const * const * const gvwgt = (wgt_type const **)graph->vwgt;
wgt_type * const pwgts = graph->pwgts;
pid_type * const * const gwhere = graph->where;
vsnbrinfo_type * const nbrinfo = vsinfo->nbrinfo;
vtx_iset_t * const bnd = vsinfo->bnd;
gnbrinfo = dlthread_get_shmem(sizeof(vsnbrinfo_type*)*nthreads,ctrl->comm);
gnbrinfo[myid] = vsinfo->nbrinfo;
combuffer = update_combuffer_create(ctrl->comm);
q = vw_pq_create(0,mynvtxs);
seed = ctrl->seed + myid;
/* The overall algorithm for a balance bass work by prioritizing vertices and
* moving them from the heavy side to the light until balanced is achieved.
*/
perm = vtx_alloc(mynvtxs);
wgt_copy(lpwgts,pwgts,3);
/* initial tracking variables */
if (lpwgts[0] > lpwgts[1]) {
side = 1;
} else {
side = 0;
}
other = side ^ 0x01;
/* how much weight I should move */
mvwgt = (pwgts[other] - maxpwgt) / nthreads;
vw_pq_clear(q);
vtx_copy(perm,bnd->ind,bnd->size);
vtx_pseudo_shuffle_r(perm,bnd->size/8,bnd->size,&seed);
for (i=0;i<bnd->size;++i) {
v = perm[i];
DL_ASSERT_EQUALS(MTMETIS_VSEP_SEP,gwhere[myid][v],"%"PF_PID_T);
myrinfo = nbrinfo + v;
gain = gvwgt[myid][v] - myrinfo->con[other];
vw_pq_push(gain,v,q);
}
nmoves = 0;
/* make sure we got a good copy of the pwgts */
dlthread_barrier(ctrl->comm);
/* make possible moves */
while (nmoves < nvtxs && q->size > 0) {
v = vw_pq_pop(q);
DL_ASSERT_EQUALS(MTMETIS_VSEP_SEP,gwhere[myid][v],"%"PF_PID_T);
DL_ASSERT_EQUALS(vtx_iset_contains(v,bnd),1,"%d");
/* we made the required moves */
if (mvwgt < 0) {
break;
}
gain = gvwgt[myid][v] - nbrinfo[v].con[other];
if (gain < 0 && mvwgt > nbrinfo[v].con[other]/2) {
continue;
}
if (gvwgt[myid][v] + lpwgts[side] >= maxpwgt) {
/* moving this vertex will be bad, see other options */
break;
}
/* Once we have selected a vertex to move, we need to update several
* things:
* -the partition and separator weights
* -pull the neighboring vertices in 'other' into the separator
* -update the priorities of the affected vertives
*/
++nmoves;
/* we're increasing side by this amount */
//mvwgt -= gvwgt[myid][v];
/* we update location of a vertex leaving the separator, before updating
* its neighbors */
gwhere[myid][v] = side;
/* at this point, we have decided to make the move */
myrinfo = nbrinfo + v;
/* remove the vertex from the boundary -- and opposing pq */
vtx_iset_remove(v,bnd);
/* adjust partition weights */
lpwgts[side] += gvwgt[myid][v];
lpwgts[MTMETIS_VSEP_SEP] -= gvwgt[myid][v];
/* process edges */
for (j=xadj[v];j<xadj[v+1];++j) {
k = adjncy[j];
if (k < mynvtxs) {
lvtx = k;
nbrid = myid;
} else {
lvtx = gvtx_to_lvtx(k,graph->dist);
nbrid = gvtx_to_tid(k,graph->dist);
}
if (gwhere[nbrid][lvtx] == MTMETIS_VSEP_SEP) {
/* update priorities of neighboring */
gnbrinfo[nbrid][lvtx].con[side] += gvwgt[myid][v];
}
if (nbrid == myid) {
S_update_neighbor(side,lvtx,myid,q,bnd,gnbrinfo,lpwgts,graph);
} else {
/* let the neighboring thread know about the move */
up.v = lvtx;
update_combuffer_add(nbrid,up,combuffer);
}
/* we're decreasing other by this amount */
mvwgt -= gvwgt[nbrid][lvtx];
}
}
/* implicit barrier */
update_combuffer_send(combuffer);
/* recieve updates from other threads */
for (o=1;o<nthreads;++o) {
t = (myid + o) % nthreads;
updates = update_combuffer_get(t,combuffer);
for (i=0;i<(vtx_type)updates->size;++i) {
up = updates->elements[i];
v = up.v;
S_update_neighbor(side,v,myid,NULL,bnd,gnbrinfo,lpwgts,graph);
}
}
update_combuffer_clear(combuffer);
lpwgts[0] -= pwgts[0];
lpwgts[1] -= pwgts[1];
lpwgts[MTMETIS_VSEP_SEP] -= pwgts[MTMETIS_VSEP_SEP];
wgt_dlthread_sumareduce(lpwgts,3,ctrl->comm);
if (myid == 0) {
pwgts[0] += lpwgts[0];
pwgts[1] += lpwgts[1];
graph->minsep = pwgts[MTMETIS_VSEP_SEP] += lpwgts[MTMETIS_VSEP_SEP];
ctrl->seed = seed;
}
dlthread_free_shmem(gnbrinfo,ctrl->comm);
update_combuffer_free(combuffer);
vw_pq_free(q);
dl_free(perm);
S_fix_iface(ctrl,graph,vsinfo,NULL,0);
DL_ASSERT(check_vsinfo(vsinfo,graph,(pid_type const **)gwhere), \
"Bad vsinfo after balancing");
DL_ASSERT(check_vsbnd(bnd,graph),"Bad boundary after balancing");
}
static vtx_type S_pass_SFM1S(
ctrl_type * const ctrl,
graph_type * const graph,
vsinfo_type * const vsinfo,
vtx_type * const moves,
vtx_type * const pullmk,
vtx_type * const pulled,
vw_pq_t * const q,
int * const locked,
vtx_type const * const iface,
vtx_type const niface,
wgt_type const maxpwgt)
{
size_t d, nnomoves;
vtx_type nmoves, totalmoves;
pid_type side, o;
wgt_type lpwgts[3];
tid_type const myid = dlthread_get_id(ctrl->comm);
wgt_type * const pwgts = graph->pwgts;
totalmoves = 0;
/* initial tracking variables */
if (pwgts[0] > pwgts[1]) {
o = 1;
} else if (pwgts[0] < pwgts[1]) {
o = 0;
} else {
o = graph->level % 2;
}
wgt_copy(lpwgts,pwgts,3);
nnomoves = 0;
for (d=0;d<ctrl->nrefpass*2;++d) {
side = (d + o) % 2;
nmoves = S_flow_SFM(ctrl,graph,vsinfo,moves,pullmk,pulled,q,locked,side, \
lpwgts,maxpwgt);
if (nmoves == 0) {
if (++nnomoves == 2) {
break;
}
} else {
totalmoves += nmoves;
nnomoves = 0;
}
}
S_sync_pwgts(myid,pwgts,lpwgts,ctrl->comm);
if (myid == 0) {
ctrl->seed = ctrl->seed+1;
}
S_fix_iface(ctrl,graph,vsinfo,iface,niface);
/* implicit barrier */
totalmoves = vtx_dlthread_sumreduce(totalmoves,ctrl->comm);
DL_ASSERT(check_vsinfo(vsinfo,graph,(pid_type const **)graph->where), \
"Bad vsinfo after refinement");
DL_ASSERT(check_vsbnd(vsinfo->bnd,graph),"Bad boundary after " \
"refinement");
#ifdef USE_ASSERTS
dlthread_barrier(ctrl->comm);
#endif
return totalmoves;
}
static vtx_type S_pass_GREEDY(
ctrl_type * const ctrl,
graph_type * const graph,
vsinfo_type * const vsinfo,
vsnbrinfo_type * const * const gnbrinfo,
update_combuffer_t * const combuffer,
vw_pq_t * const q,
vtx_type const * const iface,
vtx_type const niface,
wgt_type const maxpwgt,
int const bnd)
{
vtx_type nnone, dnmoves, nmoves;
pid_type o, side, d;
wgt_type lpwgts[3];
tid_type const myid = dlthread_get_id(ctrl->comm);
wgt_type * const pwgts = graph->pwgts;
if (pwgts[0] > pwgts[1]) {
o = 1;
} else if (pwgts[0] < pwgts[1]) {
o = 0;
} else {
o = graph->level % 2;
}
/* make local copies of the partition weights */
wgt_copy(lpwgts,pwgts,MTMETIS_VSEP_NPARTS);
nmoves = 0;
nnone = 0;
for (d=0;d<ctrl->nrefpass;++d) {
side = (d + o) % 2;
if (bnd) {
/* interface only refinement */
dnmoves = S_flow_GREEDYI(ctrl,graph,vsinfo,gnbrinfo,combuffer,q,side, \
lpwgts,maxpwgt,iface,niface);
} else {
/* regular greedy refinment */
dnmoves = S_flow_GREEDY(ctrl,graph,vsinfo,gnbrinfo,combuffer,q,side, \
lpwgts,maxpwgt);
}
/* implicit barrier */
dnmoves = vtx_dlthread_sumreduce(dnmoves,ctrl->comm);
if (myid == 0) {
ctrl->seed = ctrl->seed+1;
}
/* make sure have good information at the end of each half-pass */
S_fix_iface(ctrl,graph,vsinfo,iface,niface);
if (dnmoves == 0) {
if (++nnone == 2) {
break;
}
} else {
nnone = 0;
nmoves += dnmoves;
}
}
/* implicit barrier */
S_sync_pwgts(myid,pwgts,lpwgts,ctrl->comm);
return nmoves;
}
/******************************************************************************
* PARALLEL REFINEMENT FUNCTIONS ***********************************************
******************************************************************************/
static vtx_type S_vseprefine_FM(
ctrl_type * const ctrl,
graph_type * const graph,
size_t const niter,
vsinfo_type * const vsinfo,
wgt_type const maxpwgt)
{
vtx_type i, k, g, v, m, nmoves, lvtx, olvtx, minmove, totalmoves, ntotalmoves;
adj_type j, npulled, l;
wgt_type minsep, newbal, minbal, gain, cursep;
pid_type side, other, p;
tid_type nbrid, onbrid, myid;
vtx_type * moves, * pulled, * pullmk;
vsnbrinfo_type * myrinfo;
size_t pass;
vw_pq_t * q[2];
int ** glocked;
vsnbrinfo_type ** gnbrinfo;
vtx_iset_t ** gbnd;
tid_type const nthreads = graph->dist.nthreads;
vtx_type const nvtxs = graph->nvtxs;
vtx_type const * const gmynvtxs = graph->mynvtxs;
adj_type const * const * const gxadj = (adj_type const **)graph->xadj;
vtx_type const * const * const gadjncy = (vtx_type const **)graph->adjncy;
wgt_type const * const * const gvwgt = (wgt_type const **)graph->vwgt;
wgt_type * const pwgts = graph->pwgts;
pid_type * const * const gwhere = graph->where;
vtx_type const limit = ctrl->hillsize;
DL_ASSERT_EQUALS(nthreads,graph->dist.nthreads,"%"PF_TID_T);
myid = dlthread_get_id(ctrl->comm);
glocked = dlthread_get_shmem((sizeof(int*)*nthreads) + \
(sizeof(vsnbrinfo_type*)*nthreads) + \
(sizeof(vtx_iset_t*)*nthreads),ctrl->comm);
gnbrinfo = (vsnbrinfo_type**)(glocked+nthreads);
gbnd = (vtx_iset_t**)(gnbrinfo+nthreads);
gnbrinfo[myid] = vsinfo->nbrinfo;
glocked[myid] = int_alloc(gmynvtxs[myid]);
gbnd[myid] = vsinfo->bnd;
dlthread_barrier(ctrl->comm);
/* allocate stuff only needed by master thread */
if (myid == 0) {
moves = vtx_alloc(nvtxs+1); /* we start at 1 not 0 */
pullmk = vtx_alloc(nvtxs+2);
pulled = vtx_alloc(nvtxs*3);
/* setup priority queues */
q[MTMETIS_VSEP_PARTA] = vw_pq_create(0,graph->gnvtxs);
q[MTMETIS_VSEP_PARTB] = vw_pq_create(0,graph->gnvtxs);
} else {
/* suppress compiler warnings */
moves = NULL;
pullmk = NULL;
pulled = NULL;
q[MTMETIS_VSEP_PARTA] = NULL;
q[MTMETIS_VSEP_PARTB] = NULL;
}
ntotalmoves = 0;
for (pass=0;pass<niter;++pass) {
/* The overall algorithm for a refinement pass looks as follows:
* -Greedily select balanced moves to make, ensuring balance is maintained.
* -Track the maximum objective state, and return to it at the end of each
* pass.
*/
/* reset locked state in parallel */
myid = dlthread_get_id(ctrl->comm);
int_set(glocked[myid],UNLOCKED,gmynvtxs[myid]);
totalmoves = 0;
dlthread_barrier(ctrl->comm);
/* add boundary vertices to the queue */
if (myid == 0) {
/* initial tracking variables */
nmoves = 0;
npulled = 0;
pullmk[1] = 0;
minmove = 0;
minbal = wgt_abs_diff(pwgts[0],pwgts[1]);
cursep = minsep = graph->minsep;
/* setup priority queues */
for (p=0;p<MTMETIS_VSEP_SEP;++p) {
vw_pq_clear(q[p]);
}
for (myid=0;myid<nthreads;++myid) {
for (i=0;i<gbnd[myid]->size;++i) {
v = vtx_iset_get(i,gbnd[myid]);
DL_ASSERT_EQUALS(MTMETIS_VSEP_SEP,gwhere[myid][v],"%"PF_PID_T);
myrinfo = gnbrinfo[myid] + v;
g = lvtx_to_gvtx(v,myid,graph->dist);
for (p=0;p<MTMETIS_VSEP_SEP;++p) {
gain = gvwgt[myid][v] - myrinfo->con[p^0x01];
vw_pq_push(gain,g,q[p]);
}
}
}
/* make possible moves */
while (nmoves < nvtxs) {
side = S_pick_side(graph,pwgts,maxpwgt, \
(vsnbrinfo_type const **)gnbrinfo,q);
if (side == NULL_PID) {
/* we've emptied the priority queues */
break;
}
/* the side I'm not moving to */
other = side ^ 0x01;
g = vw_pq_pop(q[side]);
v = gvtx_to_lvtx(g,graph->dist);
myid = gvtx_to_tid(g,graph->dist);
DL_ASSERT(glocked[myid][v] != (int)other,"Pulled a vertex " \
"from %"PF_PID_T" but is locked to %"PF_PID_T,side,other);
DL_ASSERT_EQUALS(MTMETIS_VSEP_SEP,gwhere[myid][v],"%"PF_PID_T);
DL_ASSERT_EQUALS(vtx_iset_contains(v,gbnd[myid]),1,"%d");
/* make sure we have space to record the vertices added to the
* separator */
if (npulled + gxadj[myid][v+1] - gxadj[myid][v] >= 2*nvtxs-1) {
/* roll back to our best state */
break;
}
/* update our minimum objective value or check to make sure we
* haven't passed the search limit */
cursep = cursep - (gvwgt[myid][v]-gnbrinfo[myid][v].con[other]);
newbal = wgt_abs_diff(pwgts[side]+gvwgt[myid][v], \
pwgts[other]-gnbrinfo[myid][v].con[other]);
if (cursep < minsep || \
(cursep == minsep && newbal < minbal)) {
minsep = cursep;
minmove = nmoves+1;
/* we only need to abs this here, as if its negative, it means the
* move increases the balance */
minbal = newbal;
} else {
if (nmoves-minmove+1 > 2*limit || \
(nmoves-minmove+1 > limit && cursep > 1.1*minsep)) {
/* revert back to best cut */
break;
}
}
/* Once we have selected a vertex to move, we need to update several
* things:
* -the partition and separator weights
* -pull the neighboring vertices in 'other' into the separator
* -update the priorities of the affected vertives
*/
/* at this point, we have decided to make the move */
myrinfo = gnbrinfo[myid] + v;
gwhere[myid][v] = side;
moves[++nmoves] = g; /* count one up */
S_lock(v,glocked[myid],side);
/* remove the vertex from the boundary -- and opposing pq */
vtx_iset_remove(v,gbnd[myid]);
if (vw_pq_contains(g,q[other])) {
vw_pq_remove(g,q[other]);
}
/* adjust partition weights */
pwgts[side] += gvwgt[myid][v];
pwgts[MTMETIS_VSEP_SEP] -= gvwgt[myid][v];
/* process edges */
for (j=gxadj[myid][v];j<gxadj[myid][v+1];++j) {
k = gadjncy[myid][j];
if (k < gmynvtxs[myid]) {
lvtx = k;
nbrid = myid;
k = lvtx_to_gvtx(lvtx,nbrid,graph->dist);
} else {
lvtx = gvtx_to_lvtx(k,graph->dist);
nbrid = gvtx_to_tid(k,graph->dist);
}
/* from here k is a global vertex number */
if (gwhere[nbrid][lvtx] == MTMETIS_VSEP_SEP) {
/* update priorities of neighboring vertiecs */
gnbrinfo[nbrid][lvtx].con[side] += gvwgt[myid][v];
/* modify the gain/connectivity of other vertices in
* separator */
if (glocked[nbrid][lvtx] != (int)side) {
/* demote this vertex in the priority queue */
gain = gvwgt[nbrid][lvtx] - \
gnbrinfo[nbrid][lvtx].con[side];
vw_pq_update(gain,k,q[other]);
}
/* we do not need to update the 'side' priority queue, as the
* gain associate with moving 'k' to 'side' remains the same */
} else if (gwhere[nbrid][lvtx] == other) {
/* pull this vertex into the separator */
DL_ASSERT_EQUALS(vtx_iset_contains(lvtx,gbnd[nbrid]),0, \
"%d");
/* record vertex being pulled into the separator */
pulled[npulled++] = k;
/* actually move the vertex */
vtx_iset_add(lvtx,gbnd[nbrid]);
gwhere[nbrid][lvtx] = MTMETIS_VSEP_SEP;
/* calculate the connectivity */
wgt_set(gnbrinfo[nbrid][lvtx].con,0,2);
for (l=gxadj[nbrid][lvtx];l<gxadj[nbrid][lvtx+1];++l) {
m = gadjncy[nbrid][l];
if (m < gmynvtxs[nbrid]) {
olvtx = m;
onbrid = nbrid;
} else {
olvtx = gvtx_to_lvtx(m,graph->dist);
onbrid = gvtx_to_tid(m,graph->dist);
}
p = gwhere[onbrid][olvtx];
if (p < MTMETIS_VSEP_SEP) {
gnbrinfo[nbrid][lvtx].con[p] += gvwgt[onbrid][olvtx];
}
}
/* update the partition weights */
pwgts[other] -= gvwgt[nbrid][lvtx];
pwgts[MTMETIS_VSEP_SEP] += gvwgt[nbrid][lvtx];
/* add the vertex to the priority queue for further movement */
if (glocked[nbrid][lvtx] != (int)other) {
gain = gvwgt[nbrid][lvtx] - \
gnbrinfo[nbrid][lvtx].con[other];
vw_pq_push(gain,k,q[side]);
}
if (glocked[nbrid][lvtx] != (int)side) {
gain = gvwgt[nbrid][lvtx] - \
gnbrinfo[nbrid][lvtx].con[side];
vw_pq_push(gain,k,q[other]);
}
/* update neighbors of the vertices pulled into the separator */
for (l=gxadj[nbrid][lvtx];l<gxadj[nbrid][lvtx+1];++l) {
m = gadjncy[nbrid][l];
if (m < gmynvtxs[nbrid]) {
/* local vertex */
olvtx = m;
onbrid = nbrid;
m = lvtx_to_gvtx(olvtx,onbrid,graph->dist);
} else {
/* remote vertex */
olvtx = gvtx_to_lvtx(m,graph->dist);
onbrid = gvtx_to_tid(m,graph->dist);
}
if (gwhere[onbrid][olvtx] == MTMETIS_VSEP_SEP) {
/* update connectivity */
gnbrinfo[onbrid][olvtx].con[other] -= gvwgt[nbrid][lvtx];
/* this vertex is in the separator, and is now more likely
* to move. */
if (glocked[onbrid][olvtx] != (int)other) {
/* update the value for moving this vertex in the same
* diretion */
gain = gvwgt[onbrid][olvtx] - \
gnbrinfo[onbrid][olvtx].con[other];
DL_ASSERT(vw_pq_contains(m,q[side]), \
"Vertex %"PF_VTX_T" not in queue for side %" \
PF_PID_T"\n",olvtx,side);
vw_pq_update(gain,m,q[side]);
}
}
}
}
}
/* mark the number of vertices I pulled into the boundary */
pullmk[nmoves+1] = npulled;
DL_ASSERT_EQUALS(pwgts[MTMETIS_VSEP_SEP],cursep,"%"PF_WGT_T);
}
dprintf("FM: Pass %zu finished, sep = %"PF_WGT_T", rolling back %" \
PF_VTX_T"/%"PF_VTX_T" moves: %"PF_WGT_T":%"PF_WGT_T"\n",pass, \
pwgts[MTMETIS_VSEP_SEP],nmoves-minmove,nmoves, \
pwgts[MTMETIS_VSEP_PARTA],pwgts[MTMETIS_VSEP_PARTB]);
/* rollback until we are back at the maximum state -- moves must be
* undone in reverse of the order in which they were made */
while (nmoves > minmove) {
g = moves[nmoves];
v = gvtx_to_lvtx(g,graph->dist);
myid = gvtx_to_tid(g,graph->dist);
side = gwhere[myid][v];
other = side ^ 0x01;
DL_ASSERT(side != MTMETIS_VSEP_SEP,"ATtempting to unmove vertex %" \
PF_VTX_T" in separator\n",v);
/* unmove this vertex */
pwgts[MTMETIS_VSEP_SEP] += gvwgt[myid][v];
pwgts[side] -= gvwgt[myid][v];
gwhere[myid][v] = MTMETIS_VSEP_SEP;
vtx_iset_add(v,gbnd[myid]);
/* calculate the connectivity */
wgt_set(gnbrinfo[myid][v].con,0,2);
for (j=gxadj[myid][v];j<gxadj[myid][v+1];++j) {
k = gadjncy[myid][j];
if (k < gmynvtxs[myid]) {
lvtx = k;
nbrid = myid;
} else {
lvtx = gvtx_to_lvtx(k,graph->dist);
nbrid = gvtx_to_tid(k,graph->dist);
}
p = gwhere[nbrid][lvtx];
/* adjust priorities of neighboring vertices */
if (p == MTMETIS_VSEP_SEP) {
gnbrinfo[nbrid][lvtx].con[side] -= gvwgt[myid][v];
} else {
gnbrinfo[myid][v].con[p] += gvwgt[nbrid][lvtx];
}
}
/* push nodes back out of the separator */
for (i=pullmk[nmoves];i<pullmk[nmoves+1];++i) {
k = pulled[i];
lvtx = gvtx_to_lvtx(k,graph->dist);
nbrid = gvtx_to_tid(k,graph->dist);
DL_ASSERT_EQUALS(gwhere[nbrid][lvtx],MTMETIS_VSEP_SEP,"%"PF_PID_T);
/* move the vertex */
gwhere[nbrid][lvtx] = other;
/* adjust partition weights */
pwgts[other] += gvwgt[nbrid][lvtx];
pwgts[MTMETIS_VSEP_SEP] -= gvwgt[nbrid][lvtx];
/* remove the vertex from the boundary */
vtx_iset_remove(lvtx,gbnd[nbrid]);
/* update neighbor-neighbor connectivity */
for (l=gxadj[nbrid][lvtx];l<gxadj[nbrid][lvtx+1];++l) {
m = gadjncy[nbrid][l];
if (m < gmynvtxs[nbrid]) {
/* local vertex */
olvtx = m;
onbrid = nbrid;
m = lvtx_to_gvtx(olvtx,onbrid,graph->dist);
} else {
/* remote vertex */
olvtx = gvtx_to_lvtx(m,graph->dist);
onbrid = gvtx_to_tid(m,graph->dist);
}
if (gwhere[onbrid][olvtx] == MTMETIS_VSEP_SEP) {
gnbrinfo[onbrid][olvtx].con[other] += gvwgt[nbrid][lvtx];
}
}
}
/* go to the next move */
--nmoves;
}
DL_ASSERT_EQUALS(minsep,pwgts[MTMETIS_VSEP_SEP],"%"PF_WGT_T);
graph->minsep = minsep;
totalmoves += nmoves;
}
totalmoves = vtx_dlthread_sumreduce(totalmoves,ctrl->comm);
if (totalmoves == 0) {
/* exit the refinement pass if we do not make any moves */
break;
}
ntotalmoves += totalmoves;
}
myid = dlthread_get_id(ctrl->comm);
if (myid == 0) {
dl_free(moves);
dl_free(pulled);
dl_free(pullmk);
vw_pq_free(q[MTMETIS_VSEP_PARTA]);
vw_pq_free(q[MTMETIS_VSEP_PARTB]);
}
DL_ASSERT(check_vsinfo(vsinfo,graph,(pid_type const **)gwhere), \
"Bad vsinfo after refinement");
DL_ASSERT(check_vsbnd(gbnd[myid],graph),"Bad boundary after " \
"refinement");
dl_free(glocked[myid]);
dlthread_free_shmem(glocked,ctrl->comm);
return ntotalmoves;
}
static vtx_type S_vseprefine_FM1S(
ctrl_type * const ctrl,
graph_type * const graph,
size_t const niter,
vsinfo_type * const vsinfo,
wgt_type const maxpwgt)
{
vtx_type i, k, g, v, m, nmoves, lvtx, olvtx, minmove, totalmoves, ntotalmoves;
adj_type j, npulled, l;
wgt_type minsep, newbal, minbal, gain, cursep;
pid_type side, other, o, d, me;
tid_type nbrid, onbrid, myid;
vtx_type * moves, * pulled, * pullmk;
vsnbrinfo_type * myrinfo;
size_t pass;
vw_pq_t * q;
vsnbrinfo_type ** gnbrinfo;
vtx_iset_t ** gbnd;
tid_type const nthreads = graph->dist.nthreads;
vtx_type const nvtxs = graph->nvtxs;
vtx_type const * const gmynvtxs = graph->mynvtxs;
adj_type const * const * const gxadj = (adj_type const **)graph->xadj;
vtx_type const * const * const gadjncy = (vtx_type const **)graph->adjncy;
wgt_type const * const * const gvwgt = (wgt_type const **)graph->vwgt;
wgt_type * const pwgts = graph->pwgts;
pid_type * const * const gwhere = graph->where;
vtx_type const limit = ctrl->hillsize;
DL_ASSERT_EQUALS(nthreads,graph->dist.nthreads,"%"PF_TID_T);
myid = dlthread_get_id(ctrl->comm);
gnbrinfo = dlthread_get_shmem((sizeof(vsnbrinfo_type*)*nthreads) + \
(sizeof(vtx_iset_t*)*nthreads),ctrl->comm);
gbnd = (vtx_iset_t**)(gnbrinfo+nthreads);
gnbrinfo[myid] = vsinfo->nbrinfo;
gbnd[myid] = vsinfo->bnd;
/* allocate stuff only needed by master thread */
if (myid == 0) {
moves = vtx_alloc(nvtxs+1); /* we start at 1 not 0 */
pullmk = vtx_alloc(nvtxs+2);
pulled = vtx_alloc(nvtxs*3);
/* setup priority queues */
q = vw_pq_create(0,graph->gnvtxs);
} else {
/* suppress compiler warnings */
moves = NULL;
pullmk = NULL;
pulled = NULL;
q = NULL;
}
ntotalmoves = 0;
for (pass=0;pass<niter;++pass) {
/* The overall algorithm for a refinement pass looks as follows:
* -Greedily select balanced moves to make, ensuring balance is maintained.
* -Track the maximum objective state, and return to it at the end of each
* pass.
*/
/* reset locked state in parallel */
myid = dlthread_get_id(ctrl->comm);
totalmoves = 0;
dlthread_barrier(ctrl->comm);
/* add boundary vertices to the queue */
if (myid == 0) {
if (pwgts[0] > pwgts[1]) {
o = 1;
} else if (pwgts[0] < pwgts[1]) {
o = 0;
} else {
o = graph->level % 2;
}
for (d=0;d<2;++d) {
dprintf("FM1S: Pass started %zu with sep = %"PF_WGT_T \
", %"PF_WGT_T":%"PF_WGT_T"\n", pass, \
pwgts[MTMETIS_VSEP_SEP], pwgts[MTMETIS_VSEP_PARTA], \
pwgts[MTMETIS_VSEP_PARTB]);
side = (d+o) % 2;
other = side ^ 0x01;
/* initial tracking variables */
nmoves = 0;
npulled = 0;
pullmk[1] = 0;
minmove = 0;
minbal = wgt_abs_diff(pwgts[0],pwgts[1]);
vw_pq_clear(q);
for (myid=0;myid<nthreads;++myid) {
for (i=0;i<gbnd[myid]->size;++i) {
v = vtx_iset_get(i,gbnd[myid]);
DL_ASSERT_EQUALS(MTMETIS_VSEP_SEP,gwhere[myid][v],"%"PF_PID_T);
myrinfo = gnbrinfo[myid] + v;
g = lvtx_to_gvtx(v,myid,graph->dist);
gain = gvwgt[myid][v] - myrinfo->con[other];
vw_pq_push(gain,g,q);
}
}
cursep = minsep = graph->minsep;
/* make possible moves */
while (nmoves < nvtxs && q->size > 0) {
g = vw_pq_pop(q);
v = gvtx_to_lvtx(g,graph->dist);
myid = gvtx_to_tid(g,graph->dist);
DL_ASSERT_EQUALS(MTMETIS_VSEP_SEP,gwhere[myid][v],"%"PF_PID_T);
DL_ASSERT_EQUALS(vtx_iset_contains(v,gbnd[myid]),1,"%d");
/* make sure we're not overweight */
if (pwgts[side] >= maxpwgt) {
break;
}
if (pwgts[side] + gvwgt[myid][v] > maxpwgt) {
continue;
}
/* make sure we have space to record the vertices added to the
* separator */
if (npulled + gxadj[myid][v+1] - gxadj[myid][v] >= 2*nvtxs-1) {
/* roll back to our best state */
break;
}
/* update our minimum objective value or check to make sure we
* haven't passed the search limit */
cursep = cursep - (gvwgt[myid][v]-gnbrinfo[myid][v].con[other]);
newbal = wgt_abs_diff(pwgts[side]+gvwgt[myid][v], \
pwgts[other]-gnbrinfo[myid][v].con[other]);
if (cursep < minsep || \
(cursep == minsep && newbal < minbal)) {
minsep = cursep;
minmove = nmoves+1;
/* we only need to abs this here, as if its negative, it means the
* move increases the balance */
minbal = newbal;
} else {
if (nmoves-minmove+1 > 3*limit ||
(nmoves-minmove+1 > limit && cursep > minsep*1.1)) {
/* revert back to best cut */
break;
}
}
/* Once we have selected a vertex to move, we need to update several
* things:
* -the partition and separator weights
* -pull the neighboring vertices in 'other' into the separator
* -update the priorities of the affected vertives
*/
/* at this point, we have decided to make the move */
myrinfo = gnbrinfo[myid] + v;
gwhere[myid][v] = side;
moves[++nmoves] = g; /* count one up */
/* remove the vertex from the boundary -- and opposing pq */
vtx_iset_remove(v,gbnd[myid]);
/* adjust partition weights */
pwgts[side] += gvwgt[myid][v];
pwgts[MTMETIS_VSEP_SEP] -= gvwgt[myid][v];
/* process edges */
for (j=gxadj[myid][v];j<gxadj[myid][v+1];++j) {
k = gadjncy[myid][j];
if (k < gmynvtxs[myid]) {
lvtx = k;
nbrid = myid;
k = lvtx_to_gvtx(lvtx,nbrid,graph->dist);
} else {
lvtx = gvtx_to_lvtx(k,graph->dist);
nbrid = gvtx_to_tid(k,graph->dist);
}
/* from here k is a global vertex number */
if (gwhere[nbrid][lvtx] == MTMETIS_VSEP_SEP) {
/* update priorities of neighboring vertiecs */
gnbrinfo[nbrid][lvtx].con[side] += gvwgt[myid][v];
/* we do not need to update the 'side' priority queue, as the
* gain associate with moving 'k' to 'side' remains the same */
} else if (gwhere[nbrid][lvtx] == other) {
/* pull this vertex into the separator */
DL_ASSERT_EQUALS(vtx_iset_contains(lvtx,gbnd[nbrid]),0, \
"%d");
/* record vertex being pulled into the separator */
pulled[npulled++] = k;
/* actually move the vertex */
vtx_iset_add(lvtx,gbnd[nbrid]);
gwhere[nbrid][lvtx] = MTMETIS_VSEP_SEP;
/* calculate the connectivity */
S_calc_conn(lvtx,nbrid,gmynvtxs[nbrid],gxadj[nbrid], \
gadjncy[nbrid],gvwgt,(pid_type const **)gwhere,graph->dist, \
gnbrinfo[nbrid][lvtx].con);
/* update the partition weights */
pwgts[other] -= gvwgt[nbrid][lvtx];
pwgts[MTMETIS_VSEP_SEP] += gvwgt[nbrid][lvtx];
/* add the vertex to the priority queue for further movement */
gain = gvwgt[nbrid][lvtx] - \
gnbrinfo[nbrid][lvtx].con[other];
vw_pq_push(gain,k,q);
/* update neighbors of the vertices pulled into the separator */
for (l=gxadj[nbrid][lvtx];l<gxadj[nbrid][lvtx+1];++l) {
m = gadjncy[nbrid][l];
if (m < gmynvtxs[nbrid]) {
/* local vertex */
olvtx = m;
onbrid = nbrid;
m = lvtx_to_gvtx(olvtx,onbrid,graph->dist);
} else {
/* remote vertex */
olvtx = gvtx_to_lvtx(m,graph->dist);
onbrid = gvtx_to_tid(m,graph->dist);
}
if (gwhere[onbrid][olvtx] == MTMETIS_VSEP_SEP) {
/* update connectivity */
gnbrinfo[onbrid][olvtx].con[other] -= gvwgt[nbrid][lvtx];
/* this vertex is in the separator, and is now more likely
* to move. */
/* update the value for moving this vertex in the same
* diretion */
if (vw_pq_contains(m,q)) {
gain = gvwgt[onbrid][olvtx] - \
gnbrinfo[onbrid][olvtx].con[other];
vw_pq_update(gain,m,q);
}
}
}
}
}
/* mark the number of vertices I pulled into the boundary */
pullmk[nmoves+1] = npulled;
DL_ASSERT_EQUALS(pwgts[MTMETIS_VSEP_SEP],cursep,"%"PF_WGT_T);
}
dprintf("FM1S: Pass %zu finished, sep = %"PF_WGT_T", rolling back %" \
PF_VTX_T"/%"PF_VTX_T" moves: %"PF_WGT_T":%"PF_WGT_T"\n",pass, \
pwgts[MTMETIS_VSEP_SEP],nmoves-minmove,nmoves, \
pwgts[MTMETIS_VSEP_PARTA],pwgts[MTMETIS_VSEP_PARTB]);
/* rollback until we are back at the maximum state -- moves must be
* undone in reverse of the order in which they were made */
while (nmoves > minmove) {
g = moves[nmoves];
v = gvtx_to_lvtx(g,graph->dist);
myid = gvtx_to_tid(g,graph->dist);
DL_ASSERT(side != MTMETIS_VSEP_SEP,"Attempting to unmove vertex %" \
PF_VTX_T" in separator\n",v);
/* unmove this vertex */
pwgts[MTMETIS_VSEP_SEP] += gvwgt[myid][v];
pwgts[side] -= gvwgt[myid][v];
gwhere[myid][v] = MTMETIS_VSEP_SEP;
vtx_iset_add(v,gbnd[myid]);
/* calculate the connectivity */
S_calc_conn(v,myid,gmynvtxs[myid],gxadj[myid],gadjncy[myid],gvwgt, \
(pid_type const **)gwhere,graph->dist, \
gnbrinfo[myid][v].con);
/* adjust priorities of neighboring vertices */
for (j=gxadj[myid][v];j<gxadj[myid][v+1];++j) {
k = gadjncy[myid][j];
if (k < gmynvtxs[myid]) {
lvtx = k;
nbrid = myid;
} else {
lvtx = gvtx_to_lvtx(k,graph->dist);
nbrid = gvtx_to_tid(k,graph->dist);
}
me = gwhere[nbrid][lvtx];
if (me == MTMETIS_VSEP_SEP) {
gnbrinfo[nbrid][lvtx].con[side] -= gvwgt[myid][v];
}
}
/* push nodes back out of the separator */
for (i=pullmk[nmoves];i<pullmk[nmoves+1];++i) {
k = pulled[i];
lvtx = gvtx_to_lvtx(k,graph->dist);
nbrid = gvtx_to_tid(k,graph->dist);
DL_ASSERT_EQUALS(gwhere[nbrid][lvtx],MTMETIS_VSEP_SEP,"%"PF_PID_T);
/* move the vertex */
gwhere[nbrid][lvtx] = other;
/* adjust partition weights */
pwgts[other] += gvwgt[nbrid][lvtx];
pwgts[MTMETIS_VSEP_SEP] -= gvwgt[nbrid][lvtx];
/* remove the vertex from the boundary */
vtx_iset_remove(lvtx,gbnd[nbrid]);
/* update neighbor-neighbor connectivity */
for (l=gxadj[nbrid][lvtx];l<gxadj[nbrid][lvtx+1];++l) {
m = gadjncy[nbrid][l];
if (m < gmynvtxs[nbrid]) {
/* local vertex */
olvtx = m;
onbrid = nbrid;
m = lvtx_to_gvtx(olvtx,onbrid,graph->dist);
} else {
/* remote vertex */
olvtx = gvtx_to_lvtx(m,graph->dist);
onbrid = gvtx_to_tid(m,graph->dist);
}
me = gwhere[onbrid][olvtx];
if (me == MTMETIS_VSEP_SEP) {
gnbrinfo[onbrid][olvtx].con[other] += gvwgt[nbrid][lvtx];
}
}
}
/* go to the next move */
--nmoves;
}
DL_ASSERT_EQUALS(minsep,pwgts[MTMETIS_VSEP_SEP],"%"PF_WGT_T);
dprintf("FM1S: Pass %zu rolled back to sep = %"PF_WGT_T \
", %"PF_WGT_T":%"PF_WGT_T"\n", pass, \
pwgts[MTMETIS_VSEP_SEP], pwgts[MTMETIS_VSEP_PARTA], \
pwgts[MTMETIS_VSEP_PARTB]);
graph->minsep = minsep;
totalmoves += nmoves;
}
}
totalmoves = vtx_dlthread_sumreduce(totalmoves,ctrl->comm);
if (totalmoves == 0) {
/* exit the refinement pass if we do not make any moves */
break;
}
ntotalmoves += totalmoves;
}
myid = dlthread_get_id(ctrl->comm);
if (myid == 0) {
dl_free(moves);
dl_free(pulled);
dl_free(pullmk);
vw_pq_free(q);
}
DL_ASSERT(check_vsinfo(vsinfo,graph,(pid_type const **)gwhere), \
"Bad vsinfo after refinement");
DL_ASSERT(check_vsbnd(gbnd[myid],graph),"Bad boundary after " \
"refinement");
dlthread_free_shmem(gnbrinfo,ctrl->comm);
return ntotalmoves;
}
static vtx_type S_vseprefine_SFM(
ctrl_type * const ctrl,
graph_type * const graph,
size_t const niter,
vsinfo_type * const vsinfo,
wgt_type const maxpwgt)
{
vtx_type i, k, ntotalmoves, totalmoves, niface;
adj_type j;
size_t pass;
vtx_type * moves, * pullmk, * pulled, * iface;
vw_pq_t * q;
int * locked;
tid_type const myid = dlthread_get_id(ctrl->comm);
vtx_type const mynvtxs = graph->mynvtxs[myid];
adj_type const * const xadj = graph->xadj[myid];
vtx_type const * const adjncy = graph->adjncy[myid];
locked = int_init_alloc(UNLOCKED,mynvtxs);
moves = vtx_alloc(mynvtxs+1);
pullmk = vtx_alloc(mynvtxs+2);
pulled = vtx_alloc(mynvtxs*2);
/* setup priority queues */
q = vw_pq_create(0,mynvtxs);
iface = vtx_alloc(mynvtxs);
/* determine locked vertices */
niface = 0;
for (i=0;i<mynvtxs;++i) {
for (j=xadj[i];j<xadj[i+1];++j) {
k = adjncy[j];
if (k >= mynvtxs) {
iface[niface++] = i;
break;
}
}
}
ntotalmoves = 0;
for (pass=0;pass<niter;++pass) {
for (k=0;k<niface;++k) {
i = iface[k];
S_lock(i,locked,graph->where[myid][i]);
}
totalmoves = S_pass_SFM1S(ctrl,graph,vsinfo,moves,pullmk,pulled,q, \
locked,iface,niface,maxpwgt);
if (totalmoves == 0) {
break;
}
ntotalmoves += totalmoves;
}
dl_free(moves);
dl_free(pulled);
dl_free(pullmk);
dl_free(iface);
vw_pq_free(q);
if (myid == 0) {
graph->minsep = graph->pwgts[MTMETIS_VSEP_SEP];
}
dl_free(locked);
DL_ASSERT(check_vsinfo(vsinfo,graph,(pid_type const **)graph->where), \
"Bad vsinfo after refinement");
DL_ASSERT(check_vsbnd(vsinfo->bnd,graph),"Bad boundary after " \
"refinement");
dlthread_barrier(ctrl->comm);
return ntotalmoves;
}
static vtx_type S_vseprefine_GREEDY(
ctrl_type * const ctrl,
graph_type * const graph,
size_t const niter,
vsinfo_type * const vsinfo,
wgt_type const maxpwgt)
{
vtx_type gnmoves, ntotalmoves, niface, i, k;
adj_type j;
vtx_type * iface;
vsnbrinfo_type ** gnbrinfo;
size_t pass;
vw_pq_t * q;
update_combuffer_t * combuffer;
tid_type const myid = dlthread_get_id(ctrl->comm);
tid_type const nthreads = dlthread_get_nthreads(ctrl->comm);
vtx_type const mynvtxs = graph->mynvtxs[myid];
adj_type const * const xadj = graph->xadj[myid];
vtx_type const * const adjncy = graph->adjncy[myid];
DL_ASSERT_EQUALS(nthreads,graph->dist.nthreads,"%"PF_TID_T);
gnbrinfo = dlthread_get_shmem(sizeof(vsnbrinfo_type*)*nthreads,ctrl->comm);
gnbrinfo[myid] = vsinfo->nbrinfo;
q = vw_pq_create(0,mynvtxs);
iface = vtx_alloc(mynvtxs);
combuffer = update_combuffer_create(ctrl->comm);
ntotalmoves = 0;
dlthread_barrier(ctrl->comm);
niface = 0;
for (i=0;i<mynvtxs;++i) {
for (j=xadj[i];j<xadj[i+1];++j) {
k = adjncy[j];
if (k >= mynvtxs) {
iface[niface++] = i;
break;
}
}
}
for (pass=0;pass<niter;++pass) {
gnmoves = S_pass_GREEDY(ctrl,graph,vsinfo,gnbrinfo,combuffer,q,iface, \
niface,maxpwgt,0);
if (gnmoves == 0) {
/* exit the refinement pass if we do not make any moves */
break;
}
ntotalmoves += gnmoves;
}
if (myid == 0) {
graph->minsep = graph->pwgts[MTMETIS_VSEP_SEP];
}
vw_pq_free(q);
dl_free(iface);
#ifdef USE_ASSERTS
dlthread_barrier(ctrl->comm);
#endif
DL_ASSERT(check_vsinfo(vsinfo,graph,(pid_type const **)graph->where), \
"Bad vsinfo after refinement");
DL_ASSERT(check_vsbnd(vsinfo->bnd,graph),"Bad boundary after " \
"refinement");
dlthread_free_shmem(gnbrinfo,ctrl->comm);
update_combuffer_free(combuffer);
return ntotalmoves;
}
static vtx_type S_vseprefine_SFG(
ctrl_type * const ctrl,
graph_type * const graph,
size_t const niter,
vsinfo_type * const vsinfo,
wgt_type const maxpwgt)
{
vtx_type i, k, nmoves, niface;
adj_type j;
int * locked;
vtx_type * moves, * pullmk, * pulled, * iface;
vw_pq_t * q;
update_combuffer_t * combuffer;
vsnbrinfo_type ** gnbrinfo;
tid_type const myid = dlthread_get_id(ctrl->comm);
tid_type const nthreads = dlthread_get_nthreads(ctrl->comm);
vtx_type const mynvtxs = graph->mynvtxs[myid];
adj_type const * const xadj = graph->xadj[myid];
vtx_type const * const adjncy = graph->adjncy[myid];
DL_ASSERT_EQUALS(nthreads,graph->dist.nthreads,"%"PF_TID_T);
gnbrinfo = dlthread_get_shmem(sizeof(vsnbrinfo_type*)*nthreads,ctrl->comm);
gnbrinfo[myid] = vsinfo->nbrinfo;
locked = int_init_alloc(UNLOCKED,mynvtxs);
iface = vtx_alloc(mynvtxs);
moves = vtx_alloc(mynvtxs);
pullmk = vtx_alloc(mynvtxs+1);
pulled = vtx_alloc(mynvtxs*2);
/* setup priority queues */
q = vw_pq_create(0,mynvtxs);
/* determine locked vertices */
niface = 0;
for (i=0;i<mynvtxs;++i) {
for (j=xadj[i];j<xadj[i+1];++j) {
k = adjncy[j];
if (k >= mynvtxs) {
iface[niface++] = i;
break;
}
}
}
combuffer = update_combuffer_create(ctrl->comm);
nmoves = S_pass_GREEDY(ctrl,graph,vsinfo,gnbrinfo,combuffer,q, \
iface,niface,maxpwgt,0);
for (k=0;k<niface;++k) {
i = iface[k];
S_lock(i,locked,graph->where[myid][i]);
}
nmoves += S_pass_SFM1S(ctrl,graph,vsinfo,moves,pullmk,pulled,q,locked, \
iface,niface,maxpwgt);
if (myid == 0) {
graph->minsep = graph->pwgts[MTMETIS_VSEP_SEP];
}
dl_free(moves);
dl_free(pulled);
dl_free(pullmk);
dl_free(locked);
dl_free(iface);
vw_pq_free(q);
#ifdef USE_ASSERTS
dlthread_barrier(ctrl->comm);
#endif
DL_ASSERT(check_vsinfo(vsinfo,graph,(pid_type const **)graph->where), \
"Bad vsinfo after refinement");
DL_ASSERT(check_vsbnd(vsinfo->bnd,graph),"Bad boundary after " \
"refinement");
dlthread_free_shmem(gnbrinfo,ctrl->comm);
update_combuffer_free(combuffer);
return nmoves;
}
/******************************************************************************
* PUBLIC FUNCTIONS ************************************************************
******************************************************************************/
vtx_type par_vseprefine(
ctrl_type * const ctrl,
graph_type * const graph,
vsinfo_type * const vsinfo)
{
vtx_type nmoves;
wgt_type const avgvtxwgt = graph->tvwgt/graph->nvtxs;
wgt_type * const pwgts = graph->pwgts;
wgt_type const maxpwgt = ctrl->ubfactor*(pwgts[0]+pwgts[1])*0.5;
DL_ASSERT(check_separator(graph,(pid_type const **)graph->where), \
"Bad separator before refinement");
DL_ASSERT(check_vsinfo(vsinfo,graph,(pid_type const **)graph->where), \
"Bad vsinfo before refinement");
DL_ASSERT(check_vsbnd(vsinfo->bnd,graph),"Bad boundary before refinement");
if (graph->nvtxs < SERIAL_FM_FACTOR*sqrt(graph->dist.nthreads)) {
nmoves = S_vseprefine_FM1S(ctrl,graph,ctrl->nrefpass,vsinfo,maxpwgt);
} else {
/* disabled for now */
if (0 && dl_max(pwgts[0],pwgts[1]) > maxpwgt*1.03 && \
4*avgvtxwgt < wgt_abs_diff(pwgts[0],pwgts[1])) {
S_pass_BAL(ctrl,graph,vsinfo,maxpwgt);
}
switch (ctrl->rtype) {
case MTMETIS_RTYPE_GREEDY:
nmoves = S_vseprefine_GREEDY(ctrl,graph,ctrl->nrefpass,vsinfo,maxpwgt);
break;
case MTMETIS_RTYPE_FM:
nmoves = S_vseprefine_FM1S(ctrl,graph,ctrl->nrefpass,vsinfo,maxpwgt);
break;
case MTMETIS_RTYPE_SFM:
nmoves = S_vseprefine_SFM(ctrl,graph,ctrl->nrefpass,vsinfo,maxpwgt);
break;
case MTMETIS_RTYPE_SFG:
nmoves = S_vseprefine_SFG(ctrl,graph,ctrl->nrefpass,vsinfo,maxpwgt);
break;
default:
dl_error("Unknown refinement type '%d'\n",ctrl->rtype);
}
}
DL_ASSERT_EQUALS(wgt_lsum(graph->pwgts,3),graph->tvwgt,"%"PF_TWGT_T);
DL_ASSERT_EQUALS(graph->pwgts[2],graph->minsep,"%"PF_WGT_T);
DL_ASSERT(check_separator(graph,(pid_type const **)graph->where), \
"Bad separator after refinement");
par_vprintf(ctrl->verbosity,MTMETIS_VERBOSITY_HIGH,"%zu) [%"PF_VTX_T" %" \
PF_ADJ_T"] {%"PF_WGT_T" %"PF_WGT_T" %"PF_WGT_T" # %"PF_WGT_T" %" \
PF_VTX_T"}\n",graph->level,graph->nvtxs,graph->nedges,pwgts[0], \
pwgts[1],pwgts[2],maxpwgt,nmoves);
return nmoves;
}
#endif
|
convolution_pack1to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_pack1to4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack1ton, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
v4f32 _sum = (v4f32)__msa_fill_w(0);
if (bias_data_ptr)
{
_sum = (v4f32)__msa_ld_w(bias_data_ptr + p * 4, 0);
}
const float* kptr = (const float*)weight_data_pack1ton + maxk * channels * p * 4;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const float* sptr = m.row(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++) // 29.23
{
v4f32 _val = __msa_fill_w_f32(sptr[space_ofs[k]]);
v4f32 _w = (v4f32)__msa_ld_w(kptr, 0);
_sum = __msa_fmadd_w(_sum, _val, _w);
kptr += 4;
}
}
_sum = activation_ps(_sum, activation_type, activation_params);
__msa_st_w((v4i32)_sum, outptr + j * 4, 0);
}
outptr += outw * 4;
}
}
}
|
PerturbField.c | // Re-write of perturb_field.c for being accessible within the MCMC
int ComputePerturbField(
float redshift, struct UserParams *user_params, struct CosmoParams *cosmo_params,
struct InitialConditions *boxes, struct PerturbedField *perturbed_field
){
/*
ComputePerturbField uses the first-order Langragian displacement field to move the
masses in the cells of the density field. The high-res density field is extrapolated
to some high-redshift (global_params.INITIAL_REDSHIFT), then uses the zeldovich
approximation to move the grid "particles" onto the lower-res grid we use for the
maps. Then we recalculate the velocity fields on the perturbed grid.
*/
int status;
Try{ // This Try{} wraps the whole function, so we don't indent.
// Makes the parameter structs visible to a variety of functions/macros
// Do each time to avoid Python garbage collection issues
Broadcast_struct_global_PS(user_params,cosmo_params);
Broadcast_struct_global_UF(user_params,cosmo_params);
omp_set_num_threads(user_params->N_THREADS);
fftwf_complex *HIRES_density_perturb, *HIRES_density_perturb_saved;
fftwf_complex *LOWRES_density_perturb, *LOWRES_density_perturb_saved;
float growth_factor, displacement_factor_2LPT, init_growth_factor, init_displacement_factor_2LPT, xf, yf, zf;
float mass_factor, dDdt, f_pixel_factor, velocity_displacement_factor, velocity_displacement_factor_2LPT;
unsigned long long ct, HII_i, HII_j, HII_k;
int i,j,k, xi, yi, zi, dimension, switch_mid;
double ave_delta, new_ave_delta;
// Function for deciding the dimensions of loops when we could
// use either the low or high resolution grids.
switch(user_params->PERTURB_ON_HIGH_RES) {
case 0:
dimension = user_params->HII_DIM;
switch_mid = HII_MIDDLE;
break;
case 1:
dimension = user_params->DIM;
switch_mid = MIDDLE;
break;
}
// *************** BEGIN INITIALIZATION ************************** //
// perform a very rudimentary check to see if we are underresolved and not using the linear approx
if ((user_params->BOX_LEN > user_params->DIM) && !(global_params.EVOLVE_DENSITY_LINEARLY)){
LOG_WARNING("Resolution is likely too low for accurate evolved density fields\n \
It is recommended that you either increase the resolution (DIM/Box_LEN) or set the EVOLVE_DENSITY_LINEARLY flag to 1\n");
}
growth_factor = dicke(redshift);
displacement_factor_2LPT = -(3.0/7.0) * growth_factor*growth_factor; // 2LPT eq. D8
dDdt = ddickedt(redshift); // time derivative of the growth factor (1/s)
init_growth_factor = dicke(global_params.INITIAL_REDSHIFT);
init_displacement_factor_2LPT = -(3.0/7.0) * init_growth_factor*init_growth_factor; // 2LPT eq. D8
// find factor of HII pixel size / deltax pixel size
f_pixel_factor = user_params->DIM/(float)(user_params->HII_DIM);
mass_factor = pow(f_pixel_factor, 3);
// allocate memory for the updated density, and initialize
LOWRES_density_perturb = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
LOWRES_density_perturb_saved = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
if(user_params->PERTURB_ON_HIGH_RES) {
HIRES_density_perturb = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
HIRES_density_perturb_saved = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
}
double *resampled_box;
// check if the linear evolution flag was set
if (global_params.EVOLVE_DENSITY_LINEARLY){
LOG_DEBUG("Linearly evolve density field");
#pragma omp parallel shared(growth_factor,boxes,LOWRES_density_perturb,HIRES_density_perturb,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
*((float *)HIRES_density_perturb + R_FFT_INDEX(i,j,k)) = growth_factor*boxes->hires_density[R_INDEX(i,j,k)];
}
else {
*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) = growth_factor*boxes->lowres_density[HII_R_INDEX(i,j,k)];
}
}
}
}
}
}
else {
// Apply Zel'dovich/2LPT correction
LOG_DEBUG("Apply Zel'dovich");
#pragma omp parallel shared(LOWRES_density_perturb,HIRES_density_perturb,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
*((float *)HIRES_density_perturb + R_FFT_INDEX(i,j,k)) = 0.;
}
else {
*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) = 0.;
}
}
}
}
}
velocity_displacement_factor = (growth_factor-init_growth_factor) / user_params->BOX_LEN;
// now add the missing factor of D
#pragma omp parallel shared(boxes,velocity_displacement_factor,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
boxes->hires_vx[R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size
boxes->hires_vy[R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size
boxes->hires_vz[R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size
}
else {
boxes->lowres_vx[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size
boxes->lowres_vy[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size
boxes->lowres_vz[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor; // this is now comoving displacement in units of box size
}
}
}
}
}
// * ************************************************************************* * //
// * BEGIN 2LPT PART * //
// * ************************************************************************* * //
// reference: reference: Scoccimarro R., 1998, MNRAS, 299, 1097-1118 Appendix D
if(global_params.SECOND_ORDER_LPT_CORRECTIONS){
LOG_DEBUG("Apply 2LPT");
// allocate memory for the velocity boxes and read them in
velocity_displacement_factor_2LPT = (displacement_factor_2LPT - init_displacement_factor_2LPT) / user_params->BOX_LEN;
// now add the missing factor in eq. D9
#pragma omp parallel shared(boxes,velocity_displacement_factor_2LPT,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
boxes->hires_vx_2LPT[R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size
boxes->hires_vy_2LPT[R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size
boxes->hires_vz_2LPT[R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size
}
else {
boxes->lowres_vx_2LPT[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size
boxes->lowres_vy_2LPT[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size
boxes->lowres_vz_2LPT[HII_R_INDEX(i,j,k)] *= velocity_displacement_factor_2LPT; // this is now comoving displacement in units of box size
}
}
}
}
}
}
// * ************************************************************************* * //
// * END 2LPT PART * //
// * ************************************************************************* * //
// ************ END INITIALIZATION **************************** //
// Perturbing the density field required adding over multiple cells. Store intermediate result as a double to avoid rounding errors
if(user_params->PERTURB_ON_HIGH_RES) {
resampled_box = (double *)calloc(TOT_NUM_PIXELS,sizeof(double));
}
else {
resampled_box = (double *)calloc(HII_TOT_NUM_PIXELS,sizeof(double));
}
// go through the high-res box, mapping the mass onto the low-res (updated) box
LOG_DEBUG("Perturb the density field");
#pragma omp parallel shared(init_growth_factor,boxes,f_pixel_factor,resampled_box,dimension) \
private(i,j,k,xi,xf,yi,yf,zi,zf,HII_i,HII_j,HII_k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->DIM;i++){
for (j=0; j<user_params->DIM;j++){
for (k=0; k<user_params->DIM;k++){
// map indeces to locations in units of box size
xf = (i+0.5)/((user_params->DIM)+0.0);
yf = (j+0.5)/((user_params->DIM)+0.0);
zf = (k+0.5)/((user_params->DIM)+0.0);
// update locations
if(user_params->PERTURB_ON_HIGH_RES) {
xf += (boxes->hires_vx)[R_INDEX(i, j, k)];
yf += (boxes->hires_vy)[R_INDEX(i, j, k)];
zf += (boxes->hires_vz)[R_INDEX(i, j, k)];
}
else {
HII_i = (unsigned long long)(i/f_pixel_factor);
HII_j = (unsigned long long)(j/f_pixel_factor);
HII_k = (unsigned long long)(k/f_pixel_factor);
xf += (boxes->lowres_vx)[HII_R_INDEX(HII_i, HII_j, HII_k)];
yf += (boxes->lowres_vy)[HII_R_INDEX(HII_i, HII_j, HII_k)];
zf += (boxes->lowres_vz)[HII_R_INDEX(HII_i, HII_j, HII_k)];
}
// 2LPT PART
// add second order corrections
if(global_params.SECOND_ORDER_LPT_CORRECTIONS){
if(user_params->PERTURB_ON_HIGH_RES) {
xf -= (boxes->hires_vx_2LPT)[R_INDEX(i,j,k)];
yf -= (boxes->hires_vy_2LPT)[R_INDEX(i,j,k)];
zf -= (boxes->hires_vz_2LPT)[R_INDEX(i,j,k)];
}
else {
xf -= (boxes->lowres_vx_2LPT)[HII_R_INDEX(HII_i,HII_j,HII_k)];
yf -= (boxes->lowres_vy_2LPT)[HII_R_INDEX(HII_i,HII_j,HII_k)];
zf -= (boxes->lowres_vz_2LPT)[HII_R_INDEX(HII_i,HII_j,HII_k)];
}
}
xf *= (float)(dimension);
yf *= (float)(dimension);
zf *= (float)(dimension);
while (xf >= (float)(dimension)){ xf -= (dimension);}
while (xf < 0){ xf += (dimension);}
while (yf >= (float)(dimension)){ yf -= (dimension);}
while (yf < 0){ yf += (dimension);}
while (zf >= (float)(dimension)){ zf -= (dimension);}
while (zf < 0){ zf += (dimension);}
xi = xf;
yi = yf;
zi = zf;
if (xi >= (dimension)){ xi -= (dimension);}
if (xi < 0) {xi += (dimension);}
if (yi >= (dimension)){ yi -= (dimension);}
if (yi < 0) {yi += (dimension);}
if (zi >= (dimension)){ zi -= (dimension);}
if (zi < 0) {zi += (dimension);}
if(user_params->PERTURB_ON_HIGH_RES) {
#pragma omp atomic
resampled_box[R_INDEX(xi,yi,zi)] += (double)(1. + init_growth_factor*(boxes->hires_density)[R_INDEX(i,j,k)]);
}
else {
#pragma omp atomic
resampled_box[HII_R_INDEX(xi,yi,zi)] += (double)(1. + init_growth_factor*(boxes->hires_density)[R_INDEX(i,j,k)]);
}
}
}
}
}
// Resample back to a float for remaining algorithm
#pragma omp parallel shared(LOWRES_density_perturb,HIRES_density_perturb,resampled_box,dimension) \
private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
*( (float *)HIRES_density_perturb + R_FFT_INDEX(i,j,k) ) = (float)resampled_box[R_INDEX(i,j,k)];
}
else {
*( (float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k) ) = (float)resampled_box[HII_R_INDEX(i,j,k)];
}
}
}
}
}
free(resampled_box);
LOG_DEBUG("Finished perturbing the density field");
// deallocate
#pragma omp parallel shared(boxes,velocity_displacement_factor,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
boxes->hires_vx[R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity
boxes->hires_vy[R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity
boxes->hires_vz[R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity
}
else {
boxes->lowres_vx[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity
boxes->lowres_vy[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity
boxes->lowres_vz[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor; // convert back to z = 0 quantity
}
}
}
}
}
if(global_params.SECOND_ORDER_LPT_CORRECTIONS){
#pragma omp parallel shared(boxes,velocity_displacement_factor_2LPT,dimension) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<dimension; i++){
for (j=0; j<dimension; j++){
for (k=0; k<dimension; k++){
if(user_params->PERTURB_ON_HIGH_RES) {
boxes->hires_vx_2LPT[R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity
boxes->hires_vy_2LPT[R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity
boxes->hires_vz_2LPT[R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity
}
else {
boxes->lowres_vx_2LPT[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity
boxes->lowres_vy_2LPT[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity
boxes->lowres_vz_2LPT[HII_R_INDEX(i,j,k)] /= velocity_displacement_factor_2LPT; // convert back to z = 0 quantity
}
}
}
}
}
}
LOG_DEBUG("Cleanup velocities for perturb");
}
// Now, if I still have the high resolution density grid (HIRES_density_perturb) I need to downsample it to the low-resolution grid
if(user_params->PERTURB_ON_HIGH_RES) {
LOG_DEBUG("Downsample the high-res perturbed density");
// Transform to Fourier space to sample (filter) the box
dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_density_perturb);
// Need to save a copy of the high-resolution unfiltered density field for the velocities
memcpy(HIRES_density_perturb_saved, HIRES_density_perturb, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// Now filter the box
if (user_params->DIM != user_params->HII_DIM) {
filter_box(HIRES_density_perturb, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0));
}
// FFT back to real space
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_density_perturb);
// Renormalise the FFT'd box
#pragma omp parallel shared(HIRES_density_perturb,LOWRES_density_perturb,f_pixel_factor,mass_factor) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) =
*((float *)HIRES_density_perturb + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5),
(unsigned long long)(j*f_pixel_factor+0.5),
(unsigned long long)(k*f_pixel_factor+0.5)))/(float)TOT_NUM_PIXELS;
*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) -= 1.;
if (*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) < -1) {
*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) = -1.+FRACT_FLOAT_ERR;
}
}
}
}
}
}
else {
if (!global_params.EVOLVE_DENSITY_LINEARLY){
#pragma omp parallel shared(LOWRES_density_perturb,mass_factor) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
*( (float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k) ) /= mass_factor;
*( (float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k) ) -= 1.;
}
}
}
}
}
}
// transform to k-space
dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, LOWRES_density_perturb);
//smooth the field
if (!global_params.EVOLVE_DENSITY_LINEARLY && global_params.SMOOTH_EVOLVED_DENSITY_FIELD){
filter_box(LOWRES_density_perturb, 1, 2, global_params.R_smooth_density*user_params->BOX_LEN/(float)user_params->HII_DIM);
}
// save a copy of the k-space density field
memcpy(LOWRES_density_perturb_saved, LOWRES_density_perturb, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, LOWRES_density_perturb);
// normalize after FFT
#pragma omp parallel shared(LOWRES_density_perturb) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for(i=0; i<user_params->HII_DIM; i++){
for(j=0; j<user_params->HII_DIM; j++){
for(k=0; k<user_params->HII_DIM; k++){
*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) /= (float)HII_TOT_NUM_PIXELS;
if (*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) < -1) // shouldn't happen
*((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k)) = -1+FRACT_FLOAT_ERR;
}
}
}
}
#pragma omp parallel shared(perturbed_field,LOWRES_density_perturb) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
*((float *)perturbed_field->density + HII_R_INDEX(i,j,k)) = *((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k));
}
}
}
}
// **** Convert to velocities ***** //
LOG_DEBUG("Generate velocity fields");
float k_x, k_y, k_z, k_sq, dDdt_over_D;
int n_x, n_y, n_z;
dDdt_over_D = dDdt/growth_factor;
if(user_params->PERTURB_ON_HIGH_RES) {
// We are going to generate the velocity field on the high-resolution perturbed density grid
memcpy(HIRES_density_perturb, HIRES_density_perturb_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
}
else {
// We are going to generate the velocity field on the low-resolution perturbed density grid
memcpy(LOWRES_density_perturb, LOWRES_density_perturb_saved, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
}
#pragma omp parallel shared(LOWRES_density_perturb,HIRES_density_perturb,dDdt_over_D,dimension,switch_mid) \
private(n_x,n_y,n_z,k_x,k_y,k_z,k_sq) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (n_x=0; n_x<dimension; n_x++){
if (n_x>switch_mid)
k_x =(n_x-dimension) * DELTA_K; // wrap around for FFT convention
else
k_x = n_x * DELTA_K;
for (n_y=0; n_y<dimension; n_y++){
if (n_y>switch_mid)
k_y =(n_y-dimension) * DELTA_K;
else
k_y = n_y * DELTA_K;
for (n_z=0; n_z<=switch_mid; n_z++){
k_z = n_z * DELTA_K;
k_sq = k_x*k_x + k_y*k_y + k_z*k_z;
// now set the velocities
if ((n_x==0) && (n_y==0) && (n_z==0)) { // DC mode
if(user_params->PERTURB_ON_HIGH_RES) {
HIRES_density_perturb[0] = 0;
}
else {
LOWRES_density_perturb[0] = 0;
}
}
else{
if(user_params->PERTURB_ON_HIGH_RES) {
HIRES_density_perturb[C_INDEX(n_x,n_y,n_z)] *= dDdt_over_D*k_z*I/k_sq/(TOT_NUM_PIXELS+0.0);
}
else {
LOWRES_density_perturb[HII_C_INDEX(n_x,n_y,n_z)] *= dDdt_over_D*k_z*I/k_sq/(HII_TOT_NUM_PIXELS+0.0);
}
}
}
}
}
}
if(user_params->PERTURB_ON_HIGH_RES) {
// smooth the high resolution field ready for resampling
if (user_params->DIM != user_params->HII_DIM)
filter_box(HIRES_density_perturb, 0, 0, L_FACTOR*user_params->BOX_LEN/(user_params->HII_DIM+0.0));
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, HIRES_density_perturb);
#pragma omp parallel shared(perturbed_field,HIRES_density_perturb,f_pixel_factor) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
*((float *)perturbed_field->velocity + HII_R_INDEX(i,j,k)) = *((float *)HIRES_density_perturb + R_FFT_INDEX((unsigned long long)(i*f_pixel_factor+0.5), (unsigned long long)(j*f_pixel_factor+0.5), (unsigned long long)(k*f_pixel_factor+0.5)));
}
}
}
}
}
else {
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, LOWRES_density_perturb);
#pragma omp parallel shared(perturbed_field,LOWRES_density_perturb) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
*((float *)perturbed_field->velocity + HII_R_INDEX(i,j,k)) = *((float *)LOWRES_density_perturb + HII_R_FFT_INDEX(i,j,k));
}
}
}
}
}
fftwf_cleanup_threads();
fftwf_cleanup();
fftwf_forget_wisdom();
// deallocate
fftwf_free(LOWRES_density_perturb);
fftwf_free(LOWRES_density_perturb_saved);
if(user_params->PERTURB_ON_HIGH_RES) {
fftwf_free(HIRES_density_perturb);
fftwf_free(HIRES_density_perturb_saved);
}
fftwf_cleanup();
} // End of Try{}
Catch(status){
return(status);
}
return(0);
}
|
OpenMPClause.h | //===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file defines OpenMP AST classes for clauses.
/// There are clauses for executable directives, clauses for declarative
/// directives and clauses which can be used in both kinds of directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H
#define LLVM_CLANG_AST_OPENMPCLAUSE_H
#include "clang/AST/Decl.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TrailingObjects.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <utility>
namespace clang {
class ASTContext;
//===----------------------------------------------------------------------===//
// AST classes for clauses.
//===----------------------------------------------------------------------===//
/// This is a basic class for representing single OpenMP clause.
class OMPClause {
/// Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// Ending location of the clause.
SourceLocation EndLoc;
/// Kind of the clause.
OpenMPClauseKind Kind;
protected:
OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc)
: StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {}
public:
/// Returns the starting location of the clause.
SourceLocation getBeginLoc() const { return StartLoc; }
/// Returns the ending location of the clause.
SourceLocation getEndLoc() const { return EndLoc; }
/// Sets the starting location of the clause.
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// Sets the ending location of the clause.
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// Returns kind of OpenMP clause (private, shared, reduction, etc.).
OpenMPClauseKind getClauseKind() const { return Kind; }
bool isImplicit() const { return StartLoc.isInvalid(); }
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<OMPClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
/// Get the iterator range for the expressions used in the clauses. Used
/// expressions include only the children that must be evaluated at the
/// runtime before entering the construct.
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *) { return true; }
};
/// Class that handles pre-initialization statement for some clauses, like
/// 'shedule', 'firstprivate' etc.
class OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Pre-initialization statement for the clause.
Stmt *PreInit = nullptr;
/// Region that captures the associated stmt.
OpenMPDirectiveKind CaptureRegion = llvm::omp::OMPD_unknown;
protected:
OMPClauseWithPreInit(const OMPClause *This) {
assert(get(This) && "get is not tuned for pre-init.");
}
/// Set pre-initialization statement for the clause.
void
setPreInitStmt(Stmt *S,
OpenMPDirectiveKind ThisRegion = llvm::omp::OMPD_unknown) {
PreInit = S;
CaptureRegion = ThisRegion;
}
public:
/// Get pre-initialization statement for the clause.
const Stmt *getPreInitStmt() const { return PreInit; }
/// Get pre-initialization statement for the clause.
Stmt *getPreInitStmt() { return PreInit; }
/// Get capture region for the stmt in the clause.
OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; }
static OMPClauseWithPreInit *get(OMPClause *C);
static const OMPClauseWithPreInit *get(const OMPClause *C);
};
/// Class that handles post-update expression for some clauses, like
/// 'lastprivate', 'reduction' etc.
class OMPClauseWithPostUpdate : public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Post-update expression for the clause.
Expr *PostUpdate = nullptr;
protected:
OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) {
assert(get(This) && "get is not tuned for post-update.");
}
/// Set pre-initialization statement for the clause.
void setPostUpdateExpr(Expr *S) { PostUpdate = S; }
public:
/// Get post-update expression for the clause.
const Expr *getPostUpdateExpr() const { return PostUpdate; }
/// Get post-update expression for the clause.
Expr *getPostUpdateExpr() { return PostUpdate; }
static OMPClauseWithPostUpdate *get(OMPClause *C);
static const OMPClauseWithPostUpdate *get(const OMPClause *C);
};
/// This structure contains most locations needed for by an OMPVarListClause.
struct OMPVarListLocTy {
/// Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// Location of '('.
SourceLocation LParenLoc;
/// Ending location of the clause.
SourceLocation EndLoc;
OMPVarListLocTy() = default;
OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {}
};
/// This represents clauses with the list of variables like 'private',
/// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the
/// '#pragma omp ...' directives.
template <class T> class OMPVarListClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of variables in the list.
unsigned NumVars;
protected:
/// Build a clause with \a N variables
///
/// \param K Kind of the clause.
/// \param StartLoc Starting location of the clause (the clause keyword).
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N)
: OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {}
/// Fetches list of variables associated with this clause.
MutableArrayRef<Expr *> getVarRefs() {
return MutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars);
}
/// Sets the list of variables for this clause.
void setVarRefs(ArrayRef<Expr *> VL) {
assert(VL.size() == NumVars &&
"Number of variables is not the same as the preallocated buffer");
std::copy(VL.begin(), VL.end(),
static_cast<T *>(this)->template getTrailingObjects<Expr *>());
}
public:
using varlist_iterator = MutableArrayRef<Expr *>::iterator;
using varlist_const_iterator = ArrayRef<const Expr *>::iterator;
using varlist_range = llvm::iterator_range<varlist_iterator>;
using varlist_const_range = llvm::iterator_range<varlist_const_iterator>;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVarRefs().begin(); }
varlist_iterator varlist_end() { return getVarRefs().end(); }
varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); }
varlist_const_iterator varlist_end() const { return getVarRefs().end(); }
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Fetches list of all variables in the clause.
ArrayRef<const Expr *> getVarRefs() const {
return llvm::makeArrayRef(
static_cast<const T *>(this)->template getTrailingObjects<Expr *>(),
NumVars);
}
};
/// This represents 'allocator' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp allocate(a) allocator(omp_default_mem_alloc)
/// \endcode
/// In this example directive '#pragma omp allocate' has simple 'allocator'
/// clause with the allocator 'omp_default_mem_alloc'.
class OMPAllocatorClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Expression with the allocator.
Stmt *Allocator = nullptr;
/// Set allocator.
void setAllocator(Expr *A) { Allocator = A; }
public:
/// Build 'allocator' clause with the given allocator.
///
/// \param A Allocator.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc),
Allocator(A) {}
/// Build an empty clause.
OMPAllocatorClause()
: OMPClause(OMPC_allocator, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns allocator.
Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); }
child_range children() { return child_range(&Allocator, &Allocator + 1); }
const_child_range children() const {
return const_child_range(&Allocator, &Allocator + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_allocator;
}
};
/// This represents clause 'allocate' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// and clause 'allocate' for the variable 'a'.
class OMPAllocateClause final
: public OMPVarListClause<OMPAllocateClause>,
private llvm::TrailingObjects<OMPAllocateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Allocator specified in the clause, or 'nullptr' if the default one is
/// used.
Expr *Allocator = nullptr;
/// Position of the ':' delimiter in the clause;
SourceLocation ColonLoc;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Allocator Allocator expression.
/// \param ColonLoc Location of ':' delimiter.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
Expr *Allocator, SourceLocation ColonLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPAllocateClause>(OMPC_allocate, StartLoc, LParenLoc,
EndLoc, N),
Allocator(Allocator), ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPAllocateClause(unsigned N)
: OMPVarListClause<OMPAllocateClause>(OMPC_allocate, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
void setAllocator(Expr *A) { Allocator = A; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Allocator Allocator expression.
/// \param ColonLoc Location of ':' delimiter.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, Expr *Allocator,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Returns the allocator expression or nullptr, if no allocator is specified.
Expr *getAllocator() const { return Allocator; }
/// Returns the location of the ':' delimiter.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPAllocateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_allocate;
}
};
/// This represents 'if' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel if(parallel:a > 5)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'if' clause with
/// condition 'a > 5' and directive name modifier 'parallel'.
class OMPIfClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// Location of ':' (if any).
SourceLocation ColonLoc;
/// Directive name modifier for the clause.
OpenMPDirectiveKind NameModifier = llvm::omp::OMPD_unknown;
/// Name modifier location.
SourceLocation NameModifierLoc;
/// Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
/// Set directive name modifier for the clause.
void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; }
/// Set location of directive name modifier for the clause.
void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; }
/// Set location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Build 'if' clause with condition \a Cond.
///
/// \param NameModifier [OpenMP 4.1] Directive name modifier of clause.
/// \param Cond Condition of the clause.
/// \param HelperCond Helper condition for the clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param NameModifierLoc Location of directive name modifier.
/// \param ColonLoc [OpenMP 4.1] Location of ':'.
/// \param EndLoc Ending location of the clause.
OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation NameModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc)
: OMPClause(OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc),
NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
OMPIfClause()
: OMPClause(OMPC_if, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
/// Return directive name modifier associated with the clause.
OpenMPDirectiveKind getNameModifier() const { return NameModifier; }
/// Return the location of directive name modifier.
SourceLocation getNameModifierLoc() const { return NameModifierLoc; }
child_range children() { return child_range(&Condition, &Condition + 1); }
const_child_range children() const {
return const_child_range(&Condition, &Condition + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPIfClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_if;
}
};
/// This represents 'final' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task final(a > 5)
/// \endcode
/// In this example directive '#pragma omp task' has simple 'final'
/// clause with condition 'a > 5'.
class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
public:
/// Build 'final' clause with condition \a Cond.
///
/// \param Cond Condition of the clause.
/// \param HelperCond Helper condition for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPFinalClause(Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_final, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Condition(Cond) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
OMPFinalClause()
: OMPClause(OMPC_final, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
child_range children() { return child_range(&Condition, &Condition + 1); }
const_child_range children() const {
return const_child_range(&Condition, &Condition + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPFinalClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_final;
}
};
/// This represents 'num_threads' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel num_threads(6)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'num_threads'
/// clause with number of threads '6'.
class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'num_threads' clause.
Stmt *NumThreads = nullptr;
/// Set condition.
void setNumThreads(Expr *NThreads) { NumThreads = NThreads; }
public:
/// Build 'num_threads' clause with condition \a NumThreads.
///
/// \param NumThreads Number of threads for the construct.
/// \param HelperNumThreads Helper Number of threads for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_num_threads, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc),
NumThreads(NumThreads) {
setPreInitStmt(HelperNumThreads, CaptureRegion);
}
/// Build an empty clause.
OMPNumThreadsClause()
: OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of threads.
Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); }
child_range children() { return child_range(&NumThreads, &NumThreads + 1); }
const_child_range children() const {
return const_child_range(&NumThreads, &NumThreads + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_threads;
}
};
/// This represents 'safelen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd safelen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'safelen'
/// with single expression '4'.
/// If the safelen clause is used then no two iterations executed
/// concurrently with SIMD instructions can have a greater distance
/// in the logical iteration space than its value. The parameter of
/// the safelen clause must be a constant positive integer expression.
class OMPSafelenClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Safelen = nullptr;
/// Set safelen.
void setSafelen(Expr *Len) { Safelen = Len; }
public:
/// Build 'safelen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc),
Safelen(Len) {}
/// Build an empty clause.
explicit OMPSafelenClause()
: OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); }
child_range children() { return child_range(&Safelen, &Safelen + 1); }
const_child_range children() const {
return const_child_range(&Safelen, &Safelen + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_safelen;
}
};
/// This represents 'simdlen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd simdlen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'simdlen'
/// with single expression '4'.
/// If the 'simdlen' clause is used then it specifies the preferred number of
/// iterations to be executed concurrently. The parameter of the 'simdlen'
/// clause must be a constant positive integer expression.
class OMPSimdlenClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Simdlen = nullptr;
/// Set simdlen.
void setSimdlen(Expr *Len) { Simdlen = Len; }
public:
/// Build 'simdlen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc),
Simdlen(Len) {}
/// Build an empty clause.
explicit OMPSimdlenClause()
: OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); }
child_range children() { return child_range(&Simdlen, &Simdlen + 1); }
const_child_range children() const {
return const_child_range(&Simdlen, &Simdlen + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_simdlen;
}
};
/// This represents 'collapse' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd collapse(3)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'collapse'
/// with single expression '3'.
/// The parameter must be a constant positive integer expression, it specifies
/// the number of nested loops that should be collapsed into a single iteration
/// space.
class OMPCollapseClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of for-loops.
Stmt *NumForLoops = nullptr;
/// Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// Build 'collapse' clause.
///
/// \param Num Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPCollapseClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num) {}
/// Build an empty clause.
explicit OMPCollapseClause()
: OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
const_child_range children() const {
return const_child_range(&NumForLoops, &NumForLoops + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_collapse;
}
};
/// This represents 'default' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel default(shared)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'default'
/// clause with kind 'shared'.
class OMPDefaultClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'default' clause.
llvm::omp::DefaultKind Kind = llvm::omp::OMP_DEFAULT_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clauses.
///
/// \param K Argument of clause.
void setDefaultKind(llvm::omp::DefaultKind K) { Kind = K; }
/// Set argument location.
///
/// \param KLoc Argument location.
void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'default' clause with argument \a A ('none' or 'shared').
///
/// \param A Argument of the clause ('none' or 'shared').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDefaultClause(llvm::omp::DefaultKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPDefaultClause()
: OMPClause(OMPC_default, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
llvm::omp::DefaultKind getDefaultKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_default;
}
};
/// This represents 'proc_bind' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel proc_bind(master)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'proc_bind'
/// clause with kind 'master'.
class OMPProcBindClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'proc_bind' clause.
llvm::omp::ProcBindKind Kind = llvm::omp::OMP_PROC_BIND_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Kind of clause.
void setProcBindKind(llvm::omp::ProcBindKind K) { Kind = K; }
/// Set clause kind location.
///
/// \param KLoc Kind location.
void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'proc_bind' clause with argument \a A ('master', 'close' or
/// 'spread').
///
/// \param A Argument of the clause ('master', 'close' or 'spread').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPProcBindClause(llvm::omp::ProcBindKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPProcBindClause()
: OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
llvm::omp::ProcBindKind getProcBindKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_proc_bind;
}
};
/// This represents 'unified_address' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires unified_address
/// \endcode
/// In this example directive '#pragma omp requires' has 'unified_address'
/// clause.
class OMPUnifiedAddressClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'unified_address' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_unified_address, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUnifiedAddressClause()
: OMPClause(OMPC_unified_address, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_unified_address;
}
};
/// This represents 'unified_shared_memory' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires unified_shared_memory
/// \endcode
/// In this example directive '#pragma omp requires' has 'unified_shared_memory'
/// clause.
class OMPUnifiedSharedMemoryClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'unified_shared_memory' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_unified_shared_memory, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUnifiedSharedMemoryClause()
: OMPClause(OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_unified_shared_memory;
}
};
/// This represents 'reverse_offload' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires reverse_offload
/// \endcode
/// In this example directive '#pragma omp requires' has 'reverse_offload'
/// clause.
class OMPReverseOffloadClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'reverse_offload' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_reverse_offload, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReverseOffloadClause()
: OMPClause(OMPC_reverse_offload, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_reverse_offload;
}
};
/// This represents 'dynamic_allocators' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires dynamic_allocators
/// \endcode
/// In this example directive '#pragma omp requires' has 'dynamic_allocators'
/// clause.
class OMPDynamicAllocatorsClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'dynamic_allocators' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_dynamic_allocators, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPDynamicAllocatorsClause()
: OMPClause(OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_dynamic_allocators;
}
};
/// This represents 'atomic_default_mem_order' clause in the '#pragma omp
/// requires' directive.
///
/// \code
/// #pragma omp requires atomic_default_mem_order(seq_cst)
/// \endcode
/// In this example directive '#pragma omp requires' has simple
/// atomic_default_mem_order' clause with kind 'seq_cst'.
class OMPAtomicDefaultMemOrderClause final : public OMPClause {
friend class OMPClauseReader;
/// Location of '('
SourceLocation LParenLoc;
/// A kind of the 'atomic_default_mem_order' clause.
OpenMPAtomicDefaultMemOrderClauseKind Kind =
OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Kind of clause.
void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) {
Kind = K;
}
/// Set clause kind location.
///
/// \param KLoc Kind location.
void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) {
KindKwLoc = KLoc;
}
public:
/// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst',
/// 'acq_rel' or 'relaxed').
///
/// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A,
SourceLocation ALoc, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_atomic_default_mem_order, StartLoc, EndLoc),
LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPAtomicDefaultMemOrderClause()
: OMPClause(OMPC_atomic_default_mem_order, SourceLocation(),
SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the locaiton of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const {
return Kind;
}
/// Returns location of clause kind.
SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_atomic_default_mem_order;
}
};
/// This represents 'schedule' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp for' has 'schedule' clause with
/// arguments 'static' and '3'.
class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'schedule' clause.
OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown;
/// Modifiers for 'schedule' clause.
enum {FIRST, SECOND, NUM_MODIFIERS};
OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS];
/// Locations of modifiers.
SourceLocation ModifiersLoc[NUM_MODIFIERS];
/// Start location of the schedule ind in source code.
SourceLocation KindLoc;
/// Location of ',' (if any).
SourceLocation CommaLoc;
/// Chunk size.
Expr *ChunkSize = nullptr;
/// Set schedule kind.
///
/// \param K Schedule kind.
void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; }
/// Set the first schedule modifier.
///
/// \param M Schedule modifier.
void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[FIRST] = M;
}
/// Set the second schedule modifier.
///
/// \param M Schedule modifier.
void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[SECOND] = M;
}
/// Set location of the first schedule modifier.
void setFirstScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[FIRST] = Loc;
}
/// Set location of the second schedule modifier.
void setSecondScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[SECOND] = Loc;
}
/// Set schedule modifier location.
///
/// \param M Schedule modifier location.
void setScheduleModifer(OpenMPScheduleClauseModifier M) {
if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown)
Modifiers[FIRST] = M;
else {
assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown);
Modifiers[SECOND] = M;
}
}
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// Build 'schedule' clause with schedule kind \a Kind and chunk size
/// expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind Schedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
/// \param M1 The first modifier applied to 'schedule' clause.
/// \param M1Loc Location of the first modifier
/// \param M2 The second modifier applied to 'schedule' clause.
/// \param M2Loc Location of the second modifier
OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc, OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, Stmt *HelperChunkSize,
OpenMPScheduleClauseModifier M1, SourceLocation M1Loc,
OpenMPScheduleClauseModifier M2, SourceLocation M2Loc)
: OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc),
ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
Modifiers[FIRST] = M1;
Modifiers[SECOND] = M2;
ModifiersLoc[FIRST] = M1Loc;
ModifiersLoc[SECOND] = M2Loc;
}
/// Build an empty clause.
explicit OMPScheduleClause()
: OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {
Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown;
Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown;
}
/// Get kind of the clause.
OpenMPScheduleClauseKind getScheduleKind() const { return Kind; }
/// Get the first modifier of the clause.
OpenMPScheduleClauseModifier getFirstScheduleModifier() const {
return Modifiers[FIRST];
}
/// Get the second modifier of the clause.
OpenMPScheduleClauseModifier getSecondScheduleModifier() const {
return Modifiers[SECOND];
}
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getScheduleKindLoc() { return KindLoc; }
/// Get the first modifier location.
SourceLocation getFirstScheduleModifierLoc() const {
return ModifiersLoc[FIRST];
}
/// Get the second modifier location.
SourceLocation getSecondScheduleModifierLoc() const {
return ModifiersLoc[SECOND];
}
/// Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
const_child_range children() const {
auto Children = const_cast<OMPScheduleClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_schedule;
}
};
/// This represents 'ordered' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for ordered (2)
/// \endcode
/// In this example directive '#pragma omp for' has 'ordered' clause with
/// parameter 2.
class OMPOrderedClause final
: public OMPClause,
private llvm::TrailingObjects<OMPOrderedClause, Expr *> {
friend class OMPClauseReader;
friend TrailingObjects;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of for-loops.
Stmt *NumForLoops = nullptr;
/// Real number of loops.
unsigned NumberOfLoops = 0;
/// Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param NumLoops Number of loops, associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num), NumberOfLoops(NumLoops) {}
/// Build an empty clause.
explicit OMPOrderedClause(unsigned NumLoops)
: OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()),
NumberOfLoops(NumLoops) {}
/// Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param NumLoops Number of loops, associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
static OMPOrderedClause *Create(const ASTContext &C, Expr *Num,
unsigned NumLoops, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Build an empty clause.
static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops);
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
/// Set number of iterations for the specified loop.
void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations);
/// Get number of iterations for all the loops.
ArrayRef<Expr *> getLoopNumIterations() const;
/// Set loop counter for the specified loop.
void setLoopCounter(unsigned NumLoop, Expr *Counter);
/// Get loops counter for the specified loop.
Expr *getLoopCounter(unsigned NumLoop);
const Expr *getLoopCounter(unsigned NumLoop) const;
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
const_child_range children() const {
return const_child_range(&NumForLoops, &NumForLoops + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_ordered;
}
};
/// This represents 'nowait' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for nowait
/// \endcode
/// In this example directive '#pragma omp for' has 'nowait' clause.
class OMPNowaitClause : public OMPClause {
public:
/// Build 'nowait' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_nowait, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNowaitClause()
: OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nowait;
}
};
/// This represents 'untied' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task untied
/// \endcode
/// In this example directive '#pragma omp task' has 'untied' clause.
class OMPUntiedClause : public OMPClause {
public:
/// Build 'untied' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_untied, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUntiedClause()
: OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_untied;
}
};
/// This represents 'mergeable' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task mergeable
/// \endcode
/// In this example directive '#pragma omp task' has 'mergeable' clause.
class OMPMergeableClause : public OMPClause {
public:
/// Build 'mergeable' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_mergeable, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPMergeableClause()
: OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_mergeable;
}
};
/// This represents 'read' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic read
/// \endcode
/// In this example directive '#pragma omp atomic' has 'read' clause.
class OMPReadClause : public OMPClause {
public:
/// Build 'read' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_read, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_read;
}
};
/// This represents 'write' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic write
/// \endcode
/// In this example directive '#pragma omp atomic' has 'write' clause.
class OMPWriteClause : public OMPClause {
public:
/// Build 'write' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_write, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPWriteClause()
: OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_write;
}
};
/// This represents 'update' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic update
/// \endcode
/// In this example directive '#pragma omp atomic' has 'update' clause.
/// Also, this class represents 'update' clause in '#pragma omp depobj'
/// directive.
///
/// \code
/// #pragma omp depobj(a) update(in)
/// \endcode
/// In this example directive '#pragma omp depobj' has 'update' clause with 'in'
/// dependence kind.
class OMPUpdateClause final
: public OMPClause,
private llvm::TrailingObjects<OMPUpdateClause, SourceLocation,
OpenMPDependClauseKind> {
friend class OMPClauseReader;
friend TrailingObjects;
/// true if extended version of the clause for 'depobj' directive.
bool IsExtended = false;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<SourceLocation>) const {
// 2 locations: for '(' and argument location.
return IsExtended ? 2 : 0;
}
/// Sets the the location of '(' in clause for 'depobj' directive.
void setLParenLoc(SourceLocation Loc) {
assert(IsExtended && "Expected extended clause.");
*getTrailingObjects<SourceLocation>() = Loc;
}
/// Sets the the location of '(' in clause for 'depobj' directive.
void setArgumentLoc(SourceLocation Loc) {
assert(IsExtended && "Expected extended clause.");
*std::next(getTrailingObjects<SourceLocation>(), 1) = Loc;
}
/// Sets the dependence kind for the clause for 'depobj' directive.
void setDependencyKind(OpenMPDependClauseKind DK) {
assert(IsExtended && "Expected extended clause.");
*getTrailingObjects<OpenMPDependClauseKind>() = DK;
}
/// Build 'update' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc,
bool IsExtended)
: OMPClause(OMPC_update, StartLoc, EndLoc), IsExtended(IsExtended) {}
/// Build an empty clause.
OMPUpdateClause(bool IsExtended)
: OMPClause(OMPC_update, SourceLocation(), SourceLocation()),
IsExtended(IsExtended) {}
public:
/// Creates clause for 'atomic' directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Creates clause for 'depobj' directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ArgumentLoc Location of the argument.
/// \param DK Dependence kind.
/// \param EndLoc Ending location of the clause.
static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ArgumentLoc,
OpenMPDependClauseKind DK,
SourceLocation EndLoc);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param IsExtended true if extended clause for 'depobj' directive must be
/// created.
static OMPUpdateClause *CreateEmpty(const ASTContext &C, bool IsExtended);
/// Checks if the clause is the extended clauses for 'depobj' directive.
bool isExtended() const { return IsExtended; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
/// Gets the the location of '(' in clause for 'depobj' directive.
SourceLocation getLParenLoc() const {
assert(IsExtended && "Expected extended clause.");
return *getTrailingObjects<SourceLocation>();
}
/// Gets the the location of argument in clause for 'depobj' directive.
SourceLocation getArgumentLoc() const {
assert(IsExtended && "Expected extended clause.");
return *std::next(getTrailingObjects<SourceLocation>(), 1);
}
/// Gets the dependence kind in clause for 'depobj' directive.
OpenMPDependClauseKind getDependencyKind() const {
assert(IsExtended && "Expected extended clause.");
return *getTrailingObjects<OpenMPDependClauseKind>();
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_update;
}
};
/// This represents 'capture' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has 'capture' clause.
class OMPCaptureClause : public OMPClause {
public:
/// Build 'capture' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_capture, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPCaptureClause()
: OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_capture;
}
};
/// This represents 'seq_cst' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic seq_cst
/// \endcode
/// In this example directive '#pragma omp atomic' has 'seq_cst' clause.
class OMPSeqCstClause : public OMPClause {
public:
/// Build 'seq_cst' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPSeqCstClause()
: OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_seq_cst;
}
};
/// This represents 'acq_rel' clause in the '#pragma omp atomic|flush'
/// directives.
///
/// \code
/// #pragma omp flush acq_rel
/// \endcode
/// In this example directive '#pragma omp flush' has 'acq_rel' clause.
class OMPAcqRelClause final : public OMPClause {
public:
/// Build 'ack_rel' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_acq_rel, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPAcqRelClause()
: OMPClause(OMPC_acq_rel, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_acq_rel;
}
};
/// This represents 'acquire' clause in the '#pragma omp atomic|flush'
/// directives.
///
/// \code
/// #pragma omp flush acquire
/// \endcode
/// In this example directive '#pragma omp flush' has 'acquire' clause.
class OMPAcquireClause final : public OMPClause {
public:
/// Build 'acquire' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_acquire, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPAcquireClause()
: OMPClause(OMPC_acquire, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_acquire;
}
};
/// This represents 'release' clause in the '#pragma omp atomic|flush'
/// directives.
///
/// \code
/// #pragma omp flush release
/// \endcode
/// In this example directive '#pragma omp flush' has 'release' clause.
class OMPReleaseClause final : public OMPClause {
public:
/// Build 'release' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_release, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReleaseClause()
: OMPClause(OMPC_release, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_release;
}
};
/// This represents 'relaxed' clause in the '#pragma omp atomic'
/// directives.
///
/// \code
/// #pragma omp atomic relaxed
/// \endcode
/// In this example directive '#pragma omp atomic' has 'relaxed' clause.
class OMPRelaxedClause final : public OMPClause {
public:
/// Build 'relaxed' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_relaxed, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPRelaxedClause()
: OMPClause(OMPC_relaxed, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_relaxed;
}
};
/// This represents clause 'private' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// with the variables 'a' and 'b'.
class OMPPrivateClause final
: public OMPVarListClause<OMPPrivateClause>,
private llvm::TrailingObjects<OMPPrivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPPrivateClause(unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PrivateVL List of references to private copies with initializers.
static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PrivateVL);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPPrivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_private;
}
};
/// This represents clause 'firstprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel firstprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'firstprivate'
/// with the variables 'a' and 'b'.
class OMPFirstprivateClause final
: public OMPVarListClause<OMPFirstprivateClause>,
public OMPClauseWithPreInit,
private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPreInit(this) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFirstprivateClause(unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(
OMPC_firstprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPreInit(this) {}
/// Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new
/// private variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new
/// private variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the original variables.
/// \param PrivateVL List of references to private copies with initializers.
/// \param InitVL List of references to auto generated variables used for
/// initialization of a single array element. Used if firstprivate variable is
/// of array type.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
static OMPFirstprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
ArrayRef<Expr *> InitVL, Stmt *PreInit);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFirstprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range used_children() const {
auto Children = const_cast<OMPFirstprivateClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_firstprivate;
}
};
/// This represents clause 'lastprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd lastprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'lastprivate'
/// with the variables 'a' and 'b'.
class OMPLastprivateClause final
: public OMPVarListClause<OMPLastprivateClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLastprivateClause, Expr *> {
// There are 4 additional tail-allocated arrays at the end of the class:
// 1. Contains list of pseudo variables with the default initialization for
// each non-firstprivate variables. Used in codegen for initialization of
// lastprivate copies.
// 2. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents private variables
// (for arrays, single array element).
// 3. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents original variables
// (for arrays, single array element).
// 4. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of final assignment performed by the
// lastprivate clause.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Optional lastprivate kind, e.g. 'conditional', if specified by user.
OpenMPLastprivateModifier LPKind;
/// Optional location of the lasptrivate kind, if specified by user.
SourceLocation LPKindLoc;
/// Optional colon location, if specified by user.
SourceLocation ColonLoc;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
unsigned N)
: OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), LPKind(LPKind), LPKindLoc(LPKindLoc),
ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPLastprivateClause(unsigned N)
: OMPVarListClause<OMPLastprivateClause>(
OMPC_lastprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Get the list of helper expressions for initialization of private
/// copies for lastprivate variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent original variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign private copy of the variable to original variable.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
/// Sets lastprivate kind.
void setKind(OpenMPLastprivateModifier Kind) { LPKind = Kind; }
/// Sets location of the lastprivate kind.
void setKindLoc(SourceLocation Loc) { LPKindLoc = Loc; }
/// Sets colon symbol location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// private variables (for arrays, single array element).
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// original variables (for arrays, single array element).
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// lastprivate clause.
/// \param LPKind Lastprivate kind, e.g. 'conditional'.
/// \param LPKindLoc Location of the lastprivate kind.
/// \param ColonLoc Location of the ':' symbol if lastprivate kind is used.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLastprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps,
OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc,
SourceLocation ColonLoc, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Lastprivate kind.
OpenMPLastprivateModifier getKind() const { return LPKind; }
/// Returns the location of the lastprivate kind.
SourceLocation getKindLoc() const { return LPKindLoc; }
/// Returns the location of the ':' symbol, if any.
SourceLocation getColonLoc() const { return ColonLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
/// Set list of helper expressions, required for generation of private
/// copies of original lastprivate variables.
void setPrivateCopies(ArrayRef<Expr *> PrivateCopies);
helper_expr_const_range private_copies() const {
return helper_expr_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_range private_copies() {
return helper_expr_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPLastprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_lastprivate;
}
};
/// This represents clause 'shared' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel shared(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'shared'
/// with the variables 'a' and 'b'.
class OMPSharedClause final
: public OMPVarListClause<OMPSharedClause>,
private llvm::TrailingObjects<OMPSharedClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPSharedClause(unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPSharedClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_shared;
}
};
/// This represents clause 'reduction' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'reduction'
/// with operator '+' and the variables 'a' and 'b'.
class OMPReductionClause final
: public OMPVarListClause<OMPReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPReductionClause(unsigned N)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(),
SourceLocation(), SourceLocation(),
N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private copy of the reduction
/// variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent LHS expression in the final
/// reduction expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent RHS expression in the final
/// reduction expression performed by the reduction clause.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range used_children() const {
auto Children = const_cast<OMPReductionClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_reduction;
}
};
/// This represents clause 'task_reduction' in the '#pragma omp taskgroup'
/// directives.
///
/// \code
/// #pragma omp taskgroup task_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp taskgroup' has clause
/// 'task_reduction' with operator '+' and the variables 'a' and 'b'.
class OMPTaskReductionClause final
: public OMPVarListClause<OMPTaskReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPTaskReductionClause>(OMPC_task_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPTaskReductionClause(unsigned N)
: OMPVarListClause<OMPTaskReductionClause>(
OMPC_task_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPTaskReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPTaskReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_task_reduction;
}
};
/// This represents clause 'in_reduction' in the '#pragma omp task' directives.
///
/// \code
/// #pragma omp task in_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp task' has clause 'in_reduction' with
/// operator '+' and the variables 'a' and 'b'.
class OMPInReductionClause final
: public OMPVarListClause<OMPInReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPInReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPInReductionClause>(OMPC_in_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPInReductionClause(unsigned N)
: OMPVarListClause<OMPInReductionClause>(
OMPC_in_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
/// Set list of helper reduction taskgroup descriptors.
void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction taskgroup descriptors.
MutableArrayRef<Expr *> getTaskgroupDescriptors() {
return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size());
}
ArrayRef<const Expr *> getTaskgroupDescriptors() const {
return llvm::makeArrayRef(getReductionOps().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param TaskgroupDescriptors List of helper taskgroup descriptors for
/// corresponding items in parent taskgroup task_reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPInReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_const_range taskgroup_descriptors() const {
return helper_expr_const_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
helper_expr_range taskgroup_descriptors() {
return helper_expr_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPInReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_in_reduction;
}
};
/// This represents clause 'linear' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd linear(a,b : 2)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'linear'
/// with variables 'a', 'b' and linear step '2'.
class OMPLinearClause final
: public OMPVarListClause<OMPLinearClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLinearClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Modifier of 'linear' clause.
OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val;
/// Location of linear modifier if any.
SourceLocation ModifierLoc;
/// Location of ':'.
SourceLocation ColonLoc;
/// Sets the linear step for clause.
void setStep(Expr *Step) { *(getFinals().end()) = Step; }
/// Sets the expression to calculate linear step for clause.
void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; }
/// Build 'linear' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc,
EndLoc, NumVars),
OMPClauseWithPostUpdate(this), Modifier(Modifier),
ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPLinearClause(unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars),
OMPClauseWithPostUpdate(this) {}
/// Gets the list of initial values for linear variables.
///
/// There are NumVars expressions with initial values allocated after the
/// varlist, they are followed by NumVars update expressions (used to update
/// the linear variable's value on current iteration) and they are followed by
/// NumVars final expressions (used to calculate the linear variable's
/// value after the loop body). After these lists, there are 2 helper
/// expressions - linear step and a helper to calculate it before the
/// loop body (used when the linear step is not constant):
///
/// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[];
/// Finals[]; Step; CalcStep; }
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Sets the list of update expressions for linear variables.
MutableArrayRef<Expr *> getUpdates() {
return MutableArrayRef<Expr *>(getInits().end(), varlist_size());
}
ArrayRef<const Expr *> getUpdates() const {
return llvm::makeArrayRef(getInits().end(), varlist_size());
}
/// Sets the list of final update expressions for linear variables.
MutableArrayRef<Expr *> getFinals() {
return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size());
}
ArrayRef<const Expr *> getFinals() const {
return llvm::makeArrayRef(getUpdates().end(), varlist_size());
}
/// Gets the list of used expressions for linear variables.
MutableArrayRef<Expr *> getUsedExprs() {
return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1);
}
ArrayRef<const Expr *> getUsedExprs() const {
return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1);
}
/// Sets the list of the copies of original linear variables.
/// \param PL List of expressions.
void setPrivates(ArrayRef<Expr *> PL);
/// Sets the list of the initial values for linear variables.
/// \param IL List of expressions.
void setInits(ArrayRef<Expr *> IL);
public:
/// Creates clause with a list of variables \a VL and a linear step
/// \a Step.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Modifier Modifier of 'linear' clause.
/// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PL List of private copies of original variables.
/// \param IL List of initial values for the variables.
/// \param Step Linear step.
/// \param CalcStep Calculation of the linear step.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLinearClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// Set modifier.
void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; }
/// Return modifier.
OpenMPLinearClauseKind getModifier() const { return Modifier; }
/// Set modifier location.
void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
/// Return modifier location.
SourceLocation getModifierLoc() const { return ModifierLoc; }
/// Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns linear step.
Expr *getStep() { return *(getFinals().end()); }
/// Returns linear step.
const Expr *getStep() const { return *(getFinals().end()); }
/// Returns expression to calculate linear step.
Expr *getCalcStep() { return *(getFinals().end() + 1); }
/// Returns expression to calculate linear step.
const Expr *getCalcStep() const { return *(getFinals().end() + 1); }
/// Sets the list of update expressions for linear variables.
/// \param UL List of expressions.
void setUpdates(ArrayRef<Expr *> UL);
/// Sets the list of final update expressions for linear variables.
/// \param FL List of expressions.
void setFinals(ArrayRef<Expr *> FL);
/// Sets the list of used expressions for the linear clause.
void setUsedExprs(ArrayRef<Expr *> UE);
using privates_iterator = MutableArrayRef<Expr *>::iterator;
using privates_const_iterator = ArrayRef<const Expr *>::iterator;
using privates_range = llvm::iterator_range<privates_iterator>;
using privates_const_range = llvm::iterator_range<privates_const_iterator>;
privates_range privates() {
return privates_range(getPrivates().begin(), getPrivates().end());
}
privates_const_range privates() const {
return privates_const_range(getPrivates().begin(), getPrivates().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
using updates_iterator = MutableArrayRef<Expr *>::iterator;
using updates_const_iterator = ArrayRef<const Expr *>::iterator;
using updates_range = llvm::iterator_range<updates_iterator>;
using updates_const_range = llvm::iterator_range<updates_const_iterator>;
updates_range updates() {
return updates_range(getUpdates().begin(), getUpdates().end());
}
updates_const_range updates() const {
return updates_const_range(getUpdates().begin(), getUpdates().end());
}
using finals_iterator = MutableArrayRef<Expr *>::iterator;
using finals_const_iterator = ArrayRef<const Expr *>::iterator;
using finals_range = llvm::iterator_range<finals_iterator>;
using finals_const_range = llvm::iterator_range<finals_const_iterator>;
finals_range finals() {
return finals_range(getFinals().begin(), getFinals().end());
}
finals_const_range finals() const {
return finals_const_range(getFinals().begin(), getFinals().end());
}
using used_expressions_iterator = MutableArrayRef<Expr *>::iterator;
using used_expressions_const_iterator = ArrayRef<const Expr *>::iterator;
using used_expressions_range =
llvm::iterator_range<used_expressions_iterator>;
using used_expressions_const_range =
llvm::iterator_range<used_expressions_const_iterator>;
used_expressions_range used_expressions() {
return finals_range(getUsedExprs().begin(), getUsedExprs().end());
}
used_expressions_const_range used_expressions() const {
return finals_const_range(getUsedExprs().begin(), getUsedExprs().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPLinearClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPLinearClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_linear;
}
};
/// This represents clause 'aligned' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd aligned(a,b : 8)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'aligned'
/// with variables 'a', 'b' and alignment '8'.
class OMPAlignedClause final
: public OMPVarListClause<OMPAlignedClause>,
private llvm::TrailingObjects<OMPAlignedClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Sets the alignment for clause.
void setAlignment(Expr *A) { *varlist_end() = A; }
/// Build 'aligned' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc,
EndLoc, NumVars),
ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPAlignedClause(unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars) {}
public:
/// Creates clause with a list of variables \a VL and alignment \a A.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param A Alignment.
static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
Expr *A);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns alignment.
Expr *getAlignment() { return *varlist_end(); }
/// Returns alignment.
const Expr *getAlignment() const { return *varlist_end(); }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPAlignedClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_aligned;
}
};
/// This represents clause 'copyin' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel copyin(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'copyin'
/// with the variables 'a' and 'b'.
class OMPCopyinClause final
: public OMPVarListClause<OMPCopyinClause>,
private llvm::TrailingObjects<OMPCopyinClause, Expr *> {
// Class has 3 additional tail allocated arrays:
// 1. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents sources.
// 2. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents destinations.
// 3. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of propagation of master's thread values of
// threadprivate variables to local instances of that variables in other
// implicit threads.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyinClause(unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyin clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyin clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of propagation of master's thread values of
/// threadprivate variables to local instances of that variables in other
/// implicit threads.
static OMPCopyinClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPCopyinClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyin;
}
};
/// This represents clause 'copyprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp single copyprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp single' has clause 'copyprivate'
/// with the variables 'a' and 'b'.
class OMPCopyprivateClause final
: public OMPVarListClause<OMPCopyprivateClause>,
private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyprivateClause(unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(
OMPC_copyprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// copyprivate clause.
static OMPCopyprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPCopyprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyprivate;
}
};
/// This represents implicit clause 'flush' for the '#pragma omp flush'
/// directive.
/// This clause does not exist by itself, it can be only as a part of 'omp
/// flush' directive. This clause is introduced to keep the original structure
/// of \a OMPExecutableDirective class and its derivatives and to use the
/// existing infrastructure of clauses with the list of variables.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has implicit clause 'flush'
/// with the variables 'a' and 'b'.
class OMPFlushClause final
: public OMPVarListClause<OMPFlushClause>,
private llvm::TrailingObjects<OMPFlushClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFlushClause(unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> VL);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFlushClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_flush;
}
};
/// This represents implicit clause 'depobj' for the '#pragma omp depobj'
/// directive.
/// This clause does not exist by itself, it can be only as a part of 'omp
/// depobj' directive. This clause is introduced to keep the original structure
/// of \a OMPExecutableDirective class and its derivatives and to use the
/// existing infrastructure of clauses with the list of variables.
///
/// \code
/// #pragma omp depobj(a) destroy
/// \endcode
/// In this example directive '#pragma omp depobj' has implicit clause 'depobj'
/// with the depobj 'a'.
class OMPDepobjClause final : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Chunk size.
Expr *Depobj = nullptr;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDepobjClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_depobj, StartLoc, EndLoc), LParenLoc(LParenLoc) {}
/// Build an empty clause.
///
explicit OMPDepobjClause()
: OMPClause(OMPC_depobj, SourceLocation(), SourceLocation()) {}
void setDepobj(Expr *E) { Depobj = E; }
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
public:
/// Creates clause.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param Depobj depobj expression associated with the 'depobj' directive.
static OMPDepobjClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, Expr *Depobj);
/// Creates an empty clause.
///
/// \param C AST context.
static OMPDepobjClause *CreateEmpty(const ASTContext &C);
/// Returns depobj expression associated with the clause.
Expr *getDepobj() { return Depobj; }
const Expr *getDepobj() const { return Depobj; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&Depobj),
reinterpret_cast<Stmt **>(&Depobj) + 1);
}
const_child_range children() const {
auto Children = const_cast<OMPDepobjClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_depobj;
}
};
/// This represents implicit clause 'depend' for the '#pragma omp task'
/// directive.
///
/// \code
/// #pragma omp task depend(in:a,b)
/// \endcode
/// In this example directive '#pragma omp task' with clause 'depend' with the
/// variables 'a' and 'b' with dependency 'in'.
class OMPDependClause final
: public OMPVarListClause<OMPDependClause>,
private llvm::TrailingObjects<OMPDependClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Dependency type (one of in, out, inout).
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
/// Dependency type location.
SourceLocation DepLoc;
/// Colon location.
SourceLocation ColonLoc;
/// Number of loops, associated with the depend clause.
unsigned NumLoops = 0;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N, unsigned NumLoops)
: OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc,
EndLoc, N), NumLoops(NumLoops) {}
/// Build an empty clause.
///
/// \param N Number of variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
explicit OMPDependClause(unsigned N, unsigned NumLoops)
: OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(),
SourceLocation(), SourceLocation(),
N),
NumLoops(NumLoops) {}
/// Set dependency kind.
void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; }
/// Set dependency kind and its location.
void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; }
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param DepKind Dependency type.
/// \param DepLoc Location of the dependency type.
/// \param ColonLoc Colon location.
/// \param VL List of references to the variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VL, unsigned NumLoops);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N,
unsigned NumLoops);
/// Get dependency type.
OpenMPDependClauseKind getDependencyKind() const { return DepKind; }
/// Get dependency type location.
SourceLocation getDependencyLoc() const { return DepLoc; }
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Get number of loops associated with the clause.
unsigned getNumLoops() const { return NumLoops; }
/// Set the loop data for the depend clauses with 'sink|source' kind of
/// dependency.
void setLoopData(unsigned NumLoop, Expr *Cnt);
/// Get the loop data.
Expr *getLoopData(unsigned NumLoop);
const Expr *getLoopData(unsigned NumLoop) const;
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPDependClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_depend;
}
};
/// This represents 'device' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp target device(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'device'
/// with single expression 'a'.
class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Device number.
Stmt *Device = nullptr;
/// Set the device number.
///
/// \param E Device number.
void setDevice(Expr *E) { Device = E; }
public:
/// Build 'device' clause.
///
/// \param E Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDeviceClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Device(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPDeviceClause()
: OMPClause(OMPC_device, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return device number.
Expr *getDevice() { return cast<Expr>(Device); }
/// Return device number.
Expr *getDevice() const { return cast<Expr>(Device); }
child_range children() { return child_range(&Device, &Device + 1); }
const_child_range children() const {
return const_child_range(&Device, &Device + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_device;
}
};
/// This represents 'threads' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered threads
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'threads' clause.
class OMPThreadsClause : public OMPClause {
public:
/// Build 'threads' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_threads, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPThreadsClause()
: OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_threads;
}
};
/// This represents 'simd' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered simd
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'simd' clause.
class OMPSIMDClause : public OMPClause {
public:
/// Build 'simd' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_simd, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_simd;
}
};
/// Struct that defines common infrastructure to handle mappable
/// expressions used in OpenMP clauses.
class OMPClauseMappableExprCommon {
public:
/// Class that represents a component of a mappable expression. E.g.
/// for an expression S.a, the first component is a declaration reference
/// expression associated with 'S' and the second is a member expression
/// associated with the field declaration 'a'. If the expression is an array
/// subscript it may not have any associated declaration. In that case the
/// associated declaration is set to nullptr.
class MappableComponent {
/// Expression associated with the component.
Expr *AssociatedExpression = nullptr;
/// Declaration associated with the declaration. If the component does
/// not have a declaration (e.g. array subscripts or section), this is set
/// to nullptr.
ValueDecl *AssociatedDeclaration = nullptr;
public:
explicit MappableComponent() = default;
explicit MappableComponent(Expr *AssociatedExpression,
ValueDecl *AssociatedDeclaration)
: AssociatedExpression(AssociatedExpression),
AssociatedDeclaration(
AssociatedDeclaration
? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl())
: nullptr) {}
Expr *getAssociatedExpression() const { return AssociatedExpression; }
ValueDecl *getAssociatedDeclaration() const {
return AssociatedDeclaration;
}
};
// List of components of an expression. This first one is the whole
// expression and the last one is the base expression.
using MappableExprComponentList = SmallVector<MappableComponent, 8>;
using MappableExprComponentListRef = ArrayRef<MappableComponent>;
// List of all component lists associated to the same base declaration.
// E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have
// their component list but the same base declaration 'S'.
using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>;
using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>;
protected:
// Return the total number of elements in a list of component lists.
static unsigned
getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists);
// Return the total number of elements in a list of declarations. All
// declarations are expected to be canonical.
static unsigned
getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations);
};
/// This structure contains all sizes needed for by an
/// OMPMappableExprListClause.
struct OMPMappableExprListSizeTy {
/// Number of expressions listed.
unsigned NumVars;
/// Number of unique base declarations.
unsigned NumUniqueDeclarations;
/// Number of component lists.
unsigned NumComponentLists;
/// Total number of expression components.
unsigned NumComponents;
OMPMappableExprListSizeTy() = default;
OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations),
NumComponentLists(NumComponentLists), NumComponents(NumComponents) {}
};
/// This represents clauses with a list of expressions that are mappable.
/// Examples of these clauses are 'map' in
/// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from
/// in '#pragma omp target update...' directives.
template <class T>
class OMPMappableExprListClause : public OMPVarListClause<T>,
public OMPClauseMappableExprCommon {
friend class OMPClauseReader;
/// Number of unique declarations in this clause.
unsigned NumUniqueDeclarations;
/// Number of component lists in this clause.
unsigned NumComponentLists;
/// Total number of components in this clause.
unsigned NumComponents;
/// C++ nested name specifier for the associated user-defined mapper.
NestedNameSpecifierLoc MapperQualifierLoc;
/// The associated user-defined mapper identifier information.
DeclarationNameInfo MapperIdInfo;
protected:
/// Build a clause for \a NumUniqueDeclarations declarations, \a
/// NumComponentLists total component lists, and \a NumComponents total
/// components.
///
/// \param K Kind of the clause.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
/// \param MapperQualifierLocPtr C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfoPtr The identifier of associated user-defined mapper.
OMPMappableExprListClause(
OpenMPClauseKind K, const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes,
NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr,
DeclarationNameInfo *MapperIdInfoPtr = nullptr)
: OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc,
Sizes.NumVars),
NumUniqueDeclarations(Sizes.NumUniqueDeclarations),
NumComponentLists(Sizes.NumComponentLists),
NumComponents(Sizes.NumComponents) {
if (MapperQualifierLocPtr)
MapperQualifierLoc = *MapperQualifierLocPtr;
if (MapperIdInfoPtr)
MapperIdInfo = *MapperIdInfoPtr;
}
/// Get the unique declarations that are in the trailing objects of the
/// class.
MutableArrayRef<ValueDecl *> getUniqueDeclsRef() {
return MutableArrayRef<ValueDecl *>(
static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// Get the unique declarations that are in the trailing objects of the
/// class.
ArrayRef<ValueDecl *> getUniqueDeclsRef() const {
return ArrayRef<ValueDecl *>(
static_cast<const T *>(this)
->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// Set the unique declarations that are in the trailing objects of the
/// class.
void setUniqueDecls(ArrayRef<ValueDecl *> UDs) {
assert(UDs.size() == NumUniqueDeclarations &&
"Unexpected amount of unique declarations.");
std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin());
}
/// Get the number of lists per declaration that are in the trailing
/// objects of the class.
MutableArrayRef<unsigned> getDeclNumListsRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// Get the number of lists per declaration that are in the trailing
/// objects of the class.
ArrayRef<unsigned> getDeclNumListsRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// Set the number of lists per declaration that are in the trailing
/// objects of the class.
void setDeclNumLists(ArrayRef<unsigned> DNLs) {
assert(DNLs.size() == NumUniqueDeclarations &&
"Unexpected amount of list numbers.");
std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin());
}
/// Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
MutableArrayRef<unsigned> getComponentListSizesRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
ArrayRef<unsigned> getComponentListSizesRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// Set the cumulative component lists sizes that are in the trailing
/// objects of the class.
void setComponentListSizes(ArrayRef<unsigned> CLSs) {
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of component lists.");
std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin());
}
/// Get the components that are in the trailing objects of the class.
MutableArrayRef<MappableComponent> getComponentsRef() {
return MutableArrayRef<MappableComponent>(
static_cast<T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// Get the components that are in the trailing objects of the class.
ArrayRef<MappableComponent> getComponentsRef() const {
return ArrayRef<MappableComponent>(
static_cast<const T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// Set the components that are in the trailing objects of the class.
/// This requires the list sizes so that it can also fill the original
/// expressions, which are the first component of each list.
void setComponents(ArrayRef<MappableComponent> Components,
ArrayRef<unsigned> CLSs) {
assert(Components.size() == NumComponents &&
"Unexpected amount of component lists.");
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of list sizes.");
std::copy(Components.begin(), Components.end(), getComponentsRef().begin());
}
/// Fill the clause information from the list of declarations and
/// associated component lists.
void setClauseInfo(ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists) {
// Perform some checks to make sure the data sizes are consistent with the
// information available when the clause was created.
assert(getUniqueDeclarationsTotalNumber(Declarations) ==
NumUniqueDeclarations &&
"Unexpected number of mappable expression info entries!");
assert(getComponentsTotalNumber(ComponentLists) == NumComponents &&
"Unexpected total number of components!");
assert(Declarations.size() == ComponentLists.size() &&
"Declaration and component lists size is not consistent!");
assert(Declarations.size() == NumComponentLists &&
"Unexpected declaration and component lists size!");
// Organize the components by declaration and retrieve the original
// expression. Original expressions are always the first component of the
// mappable component list.
llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>>
ComponentListMap;
{
auto CI = ComponentLists.begin();
for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE;
++DI, ++CI) {
assert(!CI->empty() && "Invalid component list!");
ComponentListMap[*DI].push_back(*CI);
}
}
// Iterators of the target storage.
auto UniqueDeclarations = getUniqueDeclsRef();
auto UDI = UniqueDeclarations.begin();
auto DeclNumLists = getDeclNumListsRef();
auto DNLI = DeclNumLists.begin();
auto ComponentListSizes = getComponentListSizesRef();
auto CLSI = ComponentListSizes.begin();
auto Components = getComponentsRef();
auto CI = Components.begin();
// Variable to compute the accumulation of the number of components.
unsigned PrevSize = 0u;
// Scan all the declarations and associated component lists.
for (auto &M : ComponentListMap) {
// The declaration.
auto *D = M.first;
// The component lists.
auto CL = M.second;
// Initialize the entry.
*UDI = D;
++UDI;
*DNLI = CL.size();
++DNLI;
// Obtain the cumulative sizes and concatenate all the components in the
// reserved storage.
for (auto C : CL) {
// Accumulate with the previous size.
PrevSize += C.size();
// Save the size.
*CLSI = PrevSize;
++CLSI;
// Append components after the current components iterator.
CI = std::copy(C.begin(), C.end(), CI);
}
}
}
/// Set the nested name specifier of associated user-defined mapper.
void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) {
MapperQualifierLoc = NNSL;
}
/// Set the name of associated user-defined mapper.
void setMapperIdInfo(DeclarationNameInfo MapperId) {
MapperIdInfo = MapperId;
}
/// Get the user-defined mapper references that are in the trailing objects of
/// the class.
MutableArrayRef<Expr *> getUDMapperRefs() {
return llvm::makeMutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>() +
OMPVarListClause<T>::varlist_size(),
OMPVarListClause<T>::varlist_size());
}
/// Get the user-defined mappers references that are in the trailing objects
/// of the class.
ArrayRef<Expr *> getUDMapperRefs() const {
return llvm::makeArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>() +
OMPVarListClause<T>::varlist_size(),
OMPVarListClause<T>::varlist_size());
}
/// Set the user-defined mappers that are in the trailing objects of the
/// class.
void setUDMapperRefs(ArrayRef<Expr *> DMDs) {
assert(DMDs.size() == OMPVarListClause<T>::varlist_size() &&
"Unexpected number of user-defined mappers.");
std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin());
}
public:
/// Return the number of unique base declarations in this clause.
unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; }
/// Return the number of lists derived from the clause expressions.
unsigned getTotalComponentListNum() const { return NumComponentLists; }
/// Return the total number of components in all lists derived from the
/// clause.
unsigned getTotalComponentsNum() const { return NumComponents; }
/// Gets the nested name specifier for associated user-defined mapper.
NestedNameSpecifierLoc getMapperQualifierLoc() const {
return MapperQualifierLoc;
}
/// Gets the name info for associated user-defined mapper.
const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; }
/// Iterator that browse the components by lists. It also allows
/// browsing components of a single declaration.
class const_component_lists_iterator
: public llvm::iterator_adaptor_base<
const_component_lists_iterator,
MappableExprComponentListRef::const_iterator,
std::forward_iterator_tag, MappableComponent, ptrdiff_t,
MappableComponent, MappableComponent> {
// The declaration the iterator currently refers to.
ArrayRef<ValueDecl *>::iterator DeclCur;
// The list number associated with the current declaration.
ArrayRef<unsigned>::iterator NumListsCur;
// Remaining lists for the current declaration.
unsigned RemainingLists = 0;
// The cumulative size of the previous list, or zero if there is no previous
// list.
unsigned PrevListSize = 0;
// The cumulative sizes of the current list - it will delimit the remaining
// range of interest.
ArrayRef<unsigned>::const_iterator ListSizeCur;
ArrayRef<unsigned>::const_iterator ListSizeEnd;
// Iterator to the end of the components storage.
MappableExprComponentListRef::const_iterator End;
public:
/// Construct an iterator that scans all lists.
explicit const_component_lists_iterator(
ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum,
ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components)
: const_component_lists_iterator::iterator_adaptor_base(
Components.begin()),
DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()),
ListSizeCur(CumulativeListSizes.begin()),
ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) {
assert(UniqueDecls.size() == DeclsListNum.size() &&
"Inconsistent number of declarations and list sizes!");
if (!DeclsListNum.empty())
RemainingLists = *NumListsCur;
}
/// Construct an iterator that scan lists for a given declaration \a
/// Declaration.
explicit const_component_lists_iterator(
const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls,
ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components)
: const_component_lists_iterator(UniqueDecls, DeclsListNum,
CumulativeListSizes, Components) {
// Look for the desired declaration. While we are looking for it, we
// update the state so that we know the component where a given list
// starts.
for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) {
if (*DeclCur == Declaration)
break;
assert(*NumListsCur > 0 && "No lists associated with declaration??");
// Skip the lists associated with the current declaration, but save the
// last list size that was skipped.
std::advance(ListSizeCur, *NumListsCur - 1);
PrevListSize = *ListSizeCur;
++ListSizeCur;
}
// If we didn't find any declaration, advance the iterator to after the
// last component and set remaining lists to zero.
if (ListSizeCur == CumulativeListSizes.end()) {
this->I = End;
RemainingLists = 0u;
return;
}
// Set the remaining lists with the total number of lists of the current
// declaration.
RemainingLists = *NumListsCur;
// Adjust the list size end iterator to the end of the relevant range.
ListSizeEnd = ListSizeCur;
std::advance(ListSizeEnd, RemainingLists);
// Given that the list sizes are cumulative, the index of the component
// that start the list is the size of the previous list.
std::advance(this->I, PrevListSize);
}
// Return the array with the current list. The sizes are cumulative, so the
// array size is the difference between the current size and previous one.
std::pair<const ValueDecl *, MappableExprComponentListRef>
operator*() const {
assert(ListSizeCur != ListSizeEnd && "Invalid iterator!");
return std::make_pair(
*DeclCur,
MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize));
}
std::pair<const ValueDecl *, MappableExprComponentListRef>
operator->() const {
return **this;
}
// Skip the components of the current list.
const_component_lists_iterator &operator++() {
assert(ListSizeCur != ListSizeEnd && RemainingLists &&
"Invalid iterator!");
// If we don't have more lists just skip all the components. Otherwise,
// advance the iterator by the number of components in the current list.
if (std::next(ListSizeCur) == ListSizeEnd) {
this->I = End;
RemainingLists = 0;
} else {
std::advance(this->I, *ListSizeCur - PrevListSize);
PrevListSize = *ListSizeCur;
// We are done with a declaration, move to the next one.
if (!(--RemainingLists)) {
++DeclCur;
++NumListsCur;
RemainingLists = *NumListsCur;
assert(RemainingLists && "No lists in the following declaration??");
}
}
++ListSizeCur;
return *this;
}
};
using const_component_lists_range =
llvm::iterator_range<const_component_lists_iterator>;
/// Iterators for all component lists.
const_component_lists_iterator component_lists_begin() const {
return const_component_lists_iterator(
getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(),
getComponentsRef());
}
const_component_lists_iterator component_lists_end() const {
return const_component_lists_iterator(
ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(),
MappableExprComponentListRef(getComponentsRef().end(),
getComponentsRef().end()));
}
const_component_lists_range component_lists() const {
return {component_lists_begin(), component_lists_end()};
}
/// Iterators for component lists associated with the provided
/// declaration.
const_component_lists_iterator
decl_component_lists_begin(const ValueDecl *VD) const {
return const_component_lists_iterator(
VD, getUniqueDeclsRef(), getDeclNumListsRef(),
getComponentListSizesRef(), getComponentsRef());
}
const_component_lists_iterator decl_component_lists_end() const {
return component_lists_end();
}
const_component_lists_range decl_component_lists(const ValueDecl *VD) const {
return {decl_component_lists_begin(VD), decl_component_lists_end()};
}
/// Iterators to access all the declarations, number of lists, list sizes, and
/// components.
using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator;
using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>;
const_all_decls_range all_decls() const {
auto A = getUniqueDeclsRef();
return const_all_decls_range(A.begin(), A.end());
}
using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator;
using const_all_num_lists_range =
llvm::iterator_range<const_all_num_lists_iterator>;
const_all_num_lists_range all_num_lists() const {
auto A = getDeclNumListsRef();
return const_all_num_lists_range(A.begin(), A.end());
}
using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator;
using const_all_lists_sizes_range =
llvm::iterator_range<const_all_lists_sizes_iterator>;
const_all_lists_sizes_range all_lists_sizes() const {
auto A = getComponentListSizesRef();
return const_all_lists_sizes_range(A.begin(), A.end());
}
using const_all_components_iterator = ArrayRef<MappableComponent>::iterator;
using const_all_components_range =
llvm::iterator_range<const_all_components_iterator>;
const_all_components_range all_components() const {
auto A = getComponentsRef();
return const_all_components_range(A.begin(), A.end());
}
using mapperlist_iterator = MutableArrayRef<Expr *>::iterator;
using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator;
using mapperlist_range = llvm::iterator_range<mapperlist_iterator>;
using mapperlist_const_range =
llvm::iterator_range<mapperlist_const_iterator>;
mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); }
mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); }
mapperlist_const_iterator mapperlist_begin() const {
return getUDMapperRefs().begin();
}
mapperlist_const_iterator mapperlist_end() const {
return getUDMapperRefs().end();
}
mapperlist_range mapperlists() {
return mapperlist_range(mapperlist_begin(), mapperlist_end());
}
mapperlist_const_range mapperlists() const {
return mapperlist_const_range(mapperlist_begin(), mapperlist_end());
}
};
/// This represents clause 'map' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target map(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'map'
/// with the variables 'a' and 'b'.
class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>,
private llvm::TrailingObjects<
OMPMapClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Number of allowed map-type-modifiers.
static constexpr unsigned NumberOfModifiers =
OMPC_MAP_MODIFIER_last - OMPC_MAP_MODIFIER_unknown - 1;
private:
/// Map-type-modifiers for the 'map' clause.
OpenMPMapModifierKind MapTypeModifiers[NumberOfModifiers] = {
OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown,
OMPC_MAP_MODIFIER_unknown};
/// Location of map-type-modifiers for the 'map' clause.
SourceLocation MapTypeModifiersLoc[NumberOfModifiers];
/// Map type for the 'map' clause.
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
/// Is this an implicit map type or not.
bool MapTypeIsImplicit = false;
/// Location of the map type.
SourceLocation MapLoc;
/// Colon location.
SourceLocation ColonLoc;
/// Build a clause for \a NumVars listed expressions, \a
/// NumUniqueDeclarations declarations, \a NumComponentLists total component
/// lists, and \a NumComponents total expression components.
///
/// \param MapModifiers Map-type-modifiers.
/// \param MapModifiersLoc Locations of map-type-modifiers.
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param MapType Map type.
/// \param MapTypeIsImplicit Map type is inferred implicitly.
/// \param MapLoc Location of the map type.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<SourceLocation> MapModifiersLoc,
NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
OpenMPMapClauseKind MapType, bool MapTypeIsImplicit,
SourceLocation MapLoc, const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_map, Locs, Sizes, &MapperQualifierLoc,
&MapperIdInfo),
MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) {
assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() &&
"Unexpected number of map type modifiers.");
llvm::copy(MapModifiers, std::begin(MapTypeModifiers));
assert(llvm::array_lengthof(MapTypeModifiersLoc) ==
MapModifiersLoc.size() &&
"Unexpected number of map type modifier locations.");
llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc));
}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_map, OMPVarListLocTy(), Sizes) {}
/// Set map-type-modifier for the clause.
///
/// \param I index for map-type-modifier.
/// \param T map-type-modifier for the clause.
void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) {
assert(I < NumberOfModifiers &&
"Unexpected index to store map type modifier, exceeds array size.");
MapTypeModifiers[I] = T;
}
/// Set location for the map-type-modifier.
///
/// \param I index for map-type-modifier location.
/// \param TLoc map-type-modifier location.
void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) {
assert(I < NumberOfModifiers &&
"Index to store map type modifier location exceeds array size.");
MapTypeModifiersLoc[I] = TLoc;
}
/// Set type for the clause.
///
/// \param T Type for the clause.
void setMapType(OpenMPMapClauseKind T) { MapType = T; }
/// Set type location.
///
/// \param TLoc Type location.
void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; }
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param MapModifiers Map-type-modifiers.
/// \param MapModifiersLoc Location of map-type-modifiers.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
/// \param Type Map type.
/// \param TypeIsImplicit Map type is inferred implicitly.
/// \param TypeLoc Location of the map type.
static OMPMapClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<SourceLocation> MapModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId,
OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc);
/// Creates an empty clause with the place for \a NumVars original
/// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists
/// lists, and \a NumComponents expression components.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPMapClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
/// Fetches mapping kind for the clause.
OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; }
/// Is this an implicit map type?
/// We have to capture 'IsMapTypeImplicit' from the parser for more
/// informative error messages. It helps distinguish map(r) from
/// map(tofrom: r), which is important to print more helpful error
/// messages for some target directives.
bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; }
/// Fetches the map-type-modifier at 'Cnt' index of array of modifiers.
///
/// \param Cnt index for map-type-modifier.
OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfModifiers &&
"Requested modifier exceeds the total number of modifiers.");
return MapTypeModifiers[Cnt];
}
/// Fetches the map-type-modifier location at 'Cnt' index of array of
/// modifiers' locations.
///
/// \param Cnt index for map-type-modifier location.
SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfModifiers &&
"Requested modifier location exceeds total number of modifiers.");
return MapTypeModifiersLoc[Cnt];
}
/// Fetches ArrayRef of map-type-modifiers.
ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY {
return llvm::makeArrayRef(MapTypeModifiers);
}
/// Fetches ArrayRef of location of map-type-modifiers.
ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY {
return llvm::makeArrayRef(MapTypeModifiersLoc);
}
/// Fetches location of clause mapping kind.
SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; }
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
child_range children() {
return child_range(
reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPMapClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_tofrom)
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
auto Children = const_cast<OMPMapClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_map;
}
};
/// This represents 'num_teams' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams num_teams(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'num_teams'
/// with single expression 'n'.
class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// NumTeams number.
Stmt *NumTeams = nullptr;
/// Set the NumTeams number.
///
/// \param E NumTeams number.
void setNumTeams(Expr *E) { NumTeams = E; }
public:
/// Build 'num_teams' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), NumTeams(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPNumTeamsClause()
: OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return NumTeams number.
Expr *getNumTeams() { return cast<Expr>(NumTeams); }
/// Return NumTeams number.
Expr *getNumTeams() const { return cast<Expr>(NumTeams); }
child_range children() { return child_range(&NumTeams, &NumTeams + 1); }
const_child_range children() const {
return const_child_range(&NumTeams, &NumTeams + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_teams;
}
};
/// This represents 'thread_limit' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams thread_limit(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'thread_limit'
/// with single expression 'n'.
class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// ThreadLimit number.
Stmt *ThreadLimit = nullptr;
/// Set the ThreadLimit number.
///
/// \param E ThreadLimit number.
void setThreadLimit(Expr *E) { ThreadLimit = E; }
public:
/// Build 'thread_limit' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPThreadLimitClause(Expr *E, Stmt *HelperE,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_thread_limit, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPThreadLimitClause()
: OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return ThreadLimit number.
Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); }
/// Return ThreadLimit number.
Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); }
child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); }
const_child_range children() const {
return const_child_range(&ThreadLimit, &ThreadLimit + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_thread_limit;
}
};
/// This represents 'priority' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task priority(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'priority' with
/// single expression 'n'.
class OMPPriorityClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Priority number.
Stmt *Priority = nullptr;
/// Set the Priority number.
///
/// \param E Priority number.
void setPriority(Expr *E) { Priority = E; }
public:
/// Build 'priority' clause.
///
/// \param Priority Expression associated with this clause.
/// \param HelperPriority Helper priority for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPPriorityClause(Expr *Priority, Stmt *HelperPriority,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_priority, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Priority(Priority) {
setPreInitStmt(HelperPriority, CaptureRegion);
}
/// Build an empty clause.
OMPPriorityClause()
: OMPClause(OMPC_priority, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return Priority number.
Expr *getPriority() { return cast<Expr>(Priority); }
/// Return Priority number.
Expr *getPriority() const { return cast<Expr>(Priority); }
child_range children() { return child_range(&Priority, &Priority + 1); }
const_child_range children() const {
return const_child_range(&Priority, &Priority + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPPriorityClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_priority;
}
};
/// This represents 'grainsize' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop grainsize(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'grainsize'
/// with single expression '4'.
class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Grainsize = nullptr;
/// Set safelen.
void setGrainsize(Expr *Size) { Grainsize = Size; }
public:
/// Build 'grainsize' clause.
///
/// \param Size Expression associated with this clause.
/// \param HelperSize Helper grainsize for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPGrainsizeClause(Expr *Size, Stmt *HelperSize,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_grainsize, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Grainsize(Size) {
setPreInitStmt(HelperSize, CaptureRegion);
}
/// Build an empty clause.
explicit OMPGrainsizeClause()
: OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); }
child_range children() { return child_range(&Grainsize, &Grainsize + 1); }
const_child_range children() const {
return const_child_range(&Grainsize, &Grainsize + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPGrainsizeClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_grainsize;
}
};
/// This represents 'nogroup' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp taskloop nogroup
/// \endcode
/// In this example directive '#pragma omp taskloop' has 'nogroup' clause.
class OMPNogroupClause : public OMPClause {
public:
/// Build 'nogroup' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_nogroup, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNogroupClause()
: OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nogroup;
}
};
/// This represents 'num_tasks' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop num_tasks(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'num_tasks'
/// with single expression '4'.
class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *NumTasks = nullptr;
/// Set safelen.
void setNumTasks(Expr *Size) { NumTasks = Size; }
public:
/// Build 'num_tasks' clause.
///
/// \param Size Expression associated with this clause.
/// \param HelperSize Helper grainsize for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNumTasksClause(Expr *Size, Stmt *HelperSize,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_num_tasks, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), NumTasks(Size) {
setPreInitStmt(HelperSize, CaptureRegion);
}
/// Build an empty clause.
explicit OMPNumTasksClause()
: OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); }
child_range children() { return child_range(&NumTasks, &NumTasks + 1); }
const_child_range children() const {
return const_child_range(&NumTasks, &NumTasks + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPNumTasksClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_tasks;
}
};
/// This represents 'hint' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp critical (name) hint(6)
/// \endcode
/// In this example directive '#pragma omp critical' has name 'name' and clause
/// 'hint' with argument '6'.
class OMPHintClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Hint expression of the 'hint' clause.
Stmt *Hint = nullptr;
/// Set hint expression.
void setHint(Expr *H) { Hint = H; }
public:
/// Build 'hint' clause with expression \a Hint.
///
/// \param Hint Hint expression.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc),
Hint(Hint) {}
/// Build an empty clause.
OMPHintClause() : OMPClause(OMPC_hint, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of threads.
Expr *getHint() const { return cast_or_null<Expr>(Hint); }
child_range children() { return child_range(&Hint, &Hint + 1); }
const_child_range children() const {
return const_child_range(&Hint, &Hint + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_hint;
}
};
/// This represents 'dist_schedule' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp distribute dist_schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp distribute' has 'dist_schedule'
/// clause with arguments 'static' and '3'.
class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'schedule' clause.
OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown;
/// Start location of the schedule kind in source code.
SourceLocation KindLoc;
/// Location of ',' (if any).
SourceLocation CommaLoc;
/// Chunk size.
Expr *ChunkSize = nullptr;
/// Set schedule kind.
///
/// \param K Schedule kind.
void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; }
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// Build 'dist_schedule' clause with schedule kind \a Kind and chunk
/// size expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind DistSchedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc,
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
Stmt *HelperChunkSize)
: OMPClause(OMPC_dist_schedule, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind),
KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
}
/// Build an empty clause.
explicit OMPDistScheduleClause()
: OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Get kind of the clause.
OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; }
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getDistScheduleKindLoc() { return KindLoc; }
/// Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
const_child_range children() const {
auto Children = const_cast<OMPDistScheduleClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_dist_schedule;
}
};
/// This represents 'defaultmap' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp target defaultmap(tofrom: scalar)
/// \endcode
/// In this example directive '#pragma omp target' has 'defaultmap' clause of kind
/// 'scalar' with modifier 'tofrom'.
class OMPDefaultmapClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Modifiers for 'defaultmap' clause.
OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown;
/// Locations of modifiers.
SourceLocation ModifierLoc;
/// A kind of the 'defaultmap' clause.
OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown;
/// Start location of the defaultmap kind in source code.
SourceLocation KindLoc;
/// Set defaultmap kind.
///
/// \param K Defaultmap kind.
void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; }
/// Set the defaultmap modifier.
///
/// \param M Defaultmap modifier.
void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) {
Modifier = M;
}
/// Set location of the defaultmap modifier.
void setDefaultmapModifierLoc(SourceLocation Loc) {
ModifierLoc = Loc;
}
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set defaultmap kind start location.
///
/// \param KLoc Defaultmap kind location.
void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
public:
/// Build 'defaultmap' clause with defaultmap kind \a Kind
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param EndLoc Ending location of the clause.
/// \param Kind Defaultmap kind.
/// \param M The modifier applied to 'defaultmap' clause.
/// \param MLoc Location of the modifier
OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation MLoc, SourceLocation KLoc,
SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind,
OpenMPDefaultmapClauseModifier M)
: OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc),
Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {}
/// Build an empty clause.
explicit OMPDefaultmapClause()
: OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()) {}
/// Get kind of the clause.
OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; }
/// Get the modifier of the clause.
OpenMPDefaultmapClauseModifier getDefaultmapModifier() const {
return Modifier;
}
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getDefaultmapKindLoc() { return KindLoc; }
/// Get the modifier location.
SourceLocation getDefaultmapModifierLoc() const {
return ModifierLoc;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_defaultmap;
}
};
/// This represents clause 'to' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update to(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to'
/// with the variables 'a' and 'b'.
class OMPToClause final : public OMPMappableExprListClause<OMPToClause>,
private llvm::TrailingObjects<
OMPToClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPToClause(NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_to, Locs, Sizes, &MapperQualifierLoc,
&MapperIdInfo) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_to, OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
NestedNameSpecifierLoc UDMQualifierLoc,
DeclarationNameInfo MapperId);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPToClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPToClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_to;
}
};
/// This represents clause 'from' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update from(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'from'
/// with the variables 'a' and 'b'.
class OMPFromClause final
: public OMPMappableExprListClause<OMPFromClause>,
private llvm::TrailingObjects<
OMPFromClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPFromClause(NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_from, Locs, Sizes, &MapperQualifierLoc,
&MapperIdInfo) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_from, OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
static OMPFromClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
NestedNameSpecifierLoc UDMQualifierLoc,
DeclarationNameInfo MapperId);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPFromClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFromClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_from;
}
};
/// This represents clause 'use_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target data use_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target data' has clause
/// 'use_device_ptr' with the variables 'a' and 'b'.
class OMPUseDevicePtrClause final
: public OMPMappableExprListClause<OMPUseDevicePtrClause>,
private llvm::TrailingObjects<
OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_use_device_ptr, Locs, Sizes) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_use_device_ptr, OMPVarListLocTy(),
Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return 3 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
/// Sets the list of references to private copies with initializers for new
/// private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for new
/// private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new private
/// variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new private
/// variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param PrivateVars Expressions referring to private copies.
/// \param Inits Expressions referring to private copy initializers.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPUseDevicePtrClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars,
ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPUseDevicePtrClause *
CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_use_device_ptr;
}
};
/// This represents clause 'is_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target is_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause
/// 'is_device_ptr' with the variables 'a' and 'b'.
class OMPIsDevicePtrClause final
: public OMPMappableExprListClause<OMPIsDevicePtrClause>,
private llvm::TrailingObjects<
OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_is_device_ptr, Locs, Sizes) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_is_device_ptr, OMPVarListLocTy(),
Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPIsDevicePtrClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPIsDevicePtrClause *
CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_is_device_ptr;
}
};
/// This represents clause 'nontemporal' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp simd nontemporal(a)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'nontemporal' for
/// the variable 'a'.
class OMPNontemporalClause final
: public OMPVarListClause<OMPNontemporalClause>,
private llvm::TrailingObjects<OMPNontemporalClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPNontemporalClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPNontemporalClause>(OMPC_nontemporal, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPNontemporalClause(unsigned N)
: OMPVarListClause<OMPNontemporalClause>(
OMPC_nontemporal, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Get the list of privatied copies if the member expression was captured by
/// one of the privatization clauses.
MutableArrayRef<Expr *> getPrivateRefs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateRefs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPNontemporalClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPNontemporalClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Sets the list of references to private copies created in private clauses.
/// \param VL List of references.
void setPrivateRefs(ArrayRef<Expr *> VL);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPNontemporalClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range private_refs() {
return child_range(reinterpret_cast<Stmt **>(getPrivateRefs().begin()),
reinterpret_cast<Stmt **>(getPrivateRefs().end()));
}
const_child_range private_refs() const {
auto Children = const_cast<OMPNontemporalClause *>(this)->private_refs();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nontemporal;
}
};
/// This represents 'order' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp simd order(concurrent)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'order'
/// clause with kind 'concurrent'.
class OMPOrderClause final : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'default' clause.
OpenMPOrderClauseKind Kind = OMPC_ORDER_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Argument of clause.
void setKind(OpenMPOrderClauseKind K) { Kind = K; }
/// Set argument location.
///
/// \param KLoc Argument location.
void setKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'order' clause with argument \p A ('concurrent').
///
/// \param A Argument of the clause ('concurrent').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPOrderClause(OpenMPOrderClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A),
KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPOrderClause()
: OMPClause(OMPC_order, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPOrderClauseKind getKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_order;
}
};
/// This represents 'destroy' clause in the '#pragma omp depobj'
/// directive.
///
/// \code
/// #pragma omp depobj(a) destroy
/// \endcode
/// In this example directive '#pragma omp depobj' has 'destroy' clause.
class OMPDestroyClause final : public OMPClause {
public:
/// Build 'destroy' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_destroy, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPDestroyClause()
: OMPClause(OMPC_destroy, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_destroy;
}
};
/// This class implements a simple visitor for OMPClause
/// subclasses.
template<class ImplClass, template <typename> class Ptr, typename RetTy>
class OMPClauseVisitorBase {
public:
#define PTR(CLASS) Ptr<CLASS>
#define DISPATCH(CLASS) \
return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S))
#define OPENMP_CLAUSE(Name, Class) \
RetTy Visit ## Class (PTR(Class) S) { DISPATCH(Class); }
#include "clang/Basic/OpenMPKinds.def"
RetTy Visit(PTR(OMPClause) S) {
// Top switch clause: visit each OMPClause.
switch (S->getClauseKind()) {
default: llvm_unreachable("Unknown clause kind!");
#define OPENMP_CLAUSE(Name, Class) \
case OMPC_ ## Name : return Visit ## Class(static_cast<PTR(Class)>(S));
#include "clang/Basic/OpenMPKinds.def"
}
}
// Base case, ignore it. :)
RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); }
#undef PTR
#undef DISPATCH
};
template <typename T> using const_ptr = std::add_pointer_t<std::add_const_t<T>>;
template <class ImplClass, typename RetTy = void>
class OMPClauseVisitor
: public OMPClauseVisitorBase<ImplClass, std::add_pointer_t, RetTy> {};
template<class ImplClass, typename RetTy = void>
class ConstOMPClauseVisitor :
public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {};
class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> {
raw_ostream &OS;
const PrintingPolicy &Policy;
/// Process clauses with list of variables.
template <typename T> void VisitOMPClauseList(T *Node, char StartSym);
public:
OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy)
: OS(OS), Policy(Policy) {}
#define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *S);
#include "clang/Basic/OpenMPKinds.def"
};
/// Helper data structure representing the traits in a match clause of an
/// `declare variant` or `metadirective`. The outer level is an ordered
/// collection of selector sets, each with an associated kind and an ordered
/// collection of selectors. A selector has a kind, an optional score/condition,
/// and an ordered collection of properties.
struct OMPTraitInfo {
struct OMPTraitProperty {
llvm::omp::TraitProperty Kind = llvm::omp::TraitProperty::invalid;
};
struct OMPTraitSelector {
Expr *ScoreOrCondition = nullptr;
llvm::omp::TraitSelector Kind = llvm::omp::TraitSelector::invalid;
llvm::SmallVector<OMPTraitProperty, 4> Properties;
};
struct OMPTraitSet {
llvm::omp::TraitSet Kind = llvm::omp::TraitSet::invalid;
llvm::SmallVector<OMPTraitSelector, 4> Selectors;
};
/// The outermost level of selector sets.
llvm::SmallVector<OMPTraitSet, 4> Sets;
bool anyScoreOrCondition(
const llvm::function_ref<bool(Expr *&, bool /* IsScore */)> &Cond) {
return llvm::any_of(Sets, [&Cond](OMPTraitInfo::OMPTraitSet &Set) {
return llvm::any_of(
Set.Selectors, [&Cond](OMPTraitInfo::OMPTraitSelector &Selector) {
return Cond(Selector.ScoreOrCondition,
/* IsScore */ Selector.Kind !=
llvm::omp::TraitSelector::user_condition);
});
});
}
/// Create a variant match info object from this trait info object. While the
/// former is a flat representation the actual main difference is that the
/// latter uses clang::Expr to store the score/condition while the former is
/// independent of clang. Thus, expressions and conditions are evaluated in
/// this method.
void getAsVariantMatchInfo(ASTContext &ASTCtx,
llvm::omp::VariantMatchInfo &VMI) const;
/// Print a human readable representation into \p OS.
void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const;
};
llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI);
} // namespace clang
#endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
|
django_fmt_plug.c | /* Django 1.4 patch for JtR. Hacked together during May of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Input Format => user:$django$*type*django-hash
*
* Where,
*
* type => 1, for Django 1.4 pbkdf_sha256 hashes and
*
* django-hash => Second column of "SELECT username, password FROM auth_user"
*
* July, 2012, the oSSL PKCS5_PBKDF2_HMAC function was replaced with a much faster
* function pbkdf2() designed by JimF. Originally this function was designed for
* the mscash2 (DCC2). The same pbkdf2 function, is used, and simply required small
* changes to use SHA256.
*
* This new code is 3x to 4x FASTER than the original oSSL code. Even though it is
* only useing oSSL functions. A lot of the high level stuff in oSSL sux for speed.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_django;
#elif FMT_REGISTERS_H
john_register_one(&fmt_django);
#else
// uncomment this header to use the slower PKCS5_PBKDF2_HMAC function.
// Note, PKCS5_PBKDF2_HMAC is ONLY available in oSSL 1.00 + (1.0c I think to be exact)
//#include <openssl/evp.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "sha2.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "base64.h"
#include "base64_convert.h"
#include "pbkdf2_hmac_sha256.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 4 // tuned on core i7
#endif
static int omp_t = 1;
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Django"
#define FORMAT_NAME ""
#define FORMAT_TAG "$django$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT " (x10000)"
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define HASH_LENGTH 44
#define BINARY_SIZE 32
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests django_tests[] = {
{"$django$*1*pbkdf2_sha256$10000$qPmFbibfAY06$x/geVEkdZSlJMqvIYJ7G6i5l/6KJ0UpvLUU6cfj83VM=", "openwall"},
{"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd7$2nTDwPhSsDKOwpKiV04teVtf+a14Rs7na/lIB3KnHkM=", "123"},
{"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd1$bkdQo9RoatRomupPFP+XEo+Guuirq4mi+R1cFcV0U3M=", "openwall"},
{"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd6$Uq33DAHOFHUED+32IIqCqm+ITU1mhsGOJ7YwFf6h+6k=", "password"},
{"$django$*1*pbkdf2_sha256$10000$34L3roCQ6ZfN$R21tJK1sIDfmj9BfBocefFfuGVwE3pXcLEhChNjc+pU=", "0123456789012345678901234567890123456789012345678901234567890123"},
{"$django$*1*pbkdf2_sha256$10000$7qPqyUDw8kZV$pFmVRjlHvayoWEy8ZWXkHgfmgImUKLmkmruclpYVAxM=", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int type;
int iterations;
unsigned char salt[32];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* type */
goto err;
/* type must be 1 */
if (!isdec(p))
goto err;
if (atoi(p) != 1)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* algorithm */
goto err;
if (strcmp(p, "pbkdf2_sha256") != 0)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iterations */
goto err;
if (!isdec(p)) // FIXME: what about iterations == 0?
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if (strlen(p) > sizeof(cur_salt->salt)-1)
goto err;
if ((p = strtokm(NULL, "")) == NULL) /* hash */
goto err;
if (strlen(p)-1 != base64_valid_length(p,e_b64_mime,flg_Base64_MIME_TRAIL_EQ, 0) || strlen(p)-1 > HASH_LENGTH-1) {
goto err;
}
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char Buf[120], *ctcopy=Buf;
char *p, *t;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
strncpy(Buf, ciphertext, 119);
Buf[119] = 0;
ctcopy += FORMAT_TAG_LEN; /* skip over "$django$*" */
p = strtokm(ctcopy, "*");
cs.type = atoi(p);
strtokm(NULL, "$");
t = strtokm(NULL, "$");
cs.iterations = atoi(t);
t = strtokm(NULL, "$");
strcpy((char*)cs.salt, t);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{ static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
p = strrchr(ciphertext, '$') + 1;
base64_decode(p, strlen(p), (char*)out);
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT], i;
unsigned char *pin[MAX_KEYS_PER_CRYPT];
union {
ARCH_WORD_32 *pout[MAX_KEYS_PER_CRYPT];
unsigned char *poutc;
} x;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
x.pout[i] = crypt_out[i+index];
}
pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), 32, 0);
#else
// PKCS5_PBKDF2_HMAC(saved_key[index], strlen(saved_key[index]),
// cur_salt->salt, strlen((char*)cur_salt->salt),
// cur_salt->iterations, EVP_sha256(), 32, (unsigned char*)crypt_out[index]);
pbkdf2_sha256((unsigned char *)saved_key[index], strlen(saved_key[index]),
cur_salt->salt, strlen((char*)cur_salt->salt),
cur_salt->iterations, (unsigned char*)crypt_out[index], 32, 0);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void django_set_key(char *key, int index)
{
strcpy(saved_key[index], key);
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int)my_salt->iterations;
}
struct fmt_main fmt_django = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
django_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
django_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
area2.c | #include <stdio.h>
#include <math.h>
#define LEFT 0.0F
#define RIGHT 3.1416F
#define SLICES 100000000
#define WIDTH ((RIGHT-LEFT)/SLICES)
int
main()
{
int i;
double area=0.0f;
#pragma omp parallel for reduction(+: area)
for (i=0;i<SLICES;++i) {
area += sin(LEFT+i*WIDTH)*WIDTH;
}
printf("area = %f\n", area);
return 0;
}
|
test.c |
#include <stdio.h>
#include <omp.h>
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (1024*3)
#define M (16*32)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
double A[M][N], B[M][N], C[N], D[N], E[N];
double S[M];
double p[2];
int main(void) {
check_offloading();
INIT();
int cpuExec = 0;
#pragma omp target map(tofrom: cpuExec)
{
cpuExec = omp_is_initial_device();
}
int tms = 16;
int th = 32;
int threads[1]; threads[0] = th-1;
//
// Test: proc_bind clause
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(master)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i]; \
B[idx][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(close)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i]; \
B[idx][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(spread)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i]; \
B[idx][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: private, shared clauses on omp teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES private(p,q) shared(A,B,C,D,E)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p = 2; \
double q = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[idx][i] += p; \
B[idx][i] += q; \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) 6 + SUMS * (N/2*(N+1))))
//
// Test: firstprivate clause on omp teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES firstprivate(p,q)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p = -4; \
double q = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i] + p; \
B[idx][i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: lastprivate clause on omp teams distribute parallel for with nested parallel.
//
TESTD("omp target", {
_Pragma("teams distribute parallel for num_teams(tms) num_threads(th)")
for (int idx = 0; idx < tms*th; idx++) {
double q0[1];
double q1[1];
double q2[1];
double q3[1];
S[idx] = 0;
for (int i = 0; i < N; i++) {
A[idx][i] = B[idx][i] = 0;
}
_Pragma("omp parallel for lastprivate(q0) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q0[0] = C[i] + D[i];
A[idx][i] += q0[0];
}
_Pragma("omp parallel for schedule(auto) lastprivate(q1) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q1[0] = C[i] + D[i];
A[idx][i] += q1[0];
}
_Pragma("omp parallel for schedule(static) lastprivate(q2) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q2[0] = D[i] + E[i];
B[idx][i] += q2[0];
}
_Pragma("omp parallel for schedule(static,9) lastprivate(q3) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q3[0] = D[i] + E[i];
B[idx][i] += q3[0];
}
double tmp = q0[0] + q1[0] + q2[0] + q3[0];
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
}
}, VERIFY(0, tms*th, S[i], (double) 2 * (N + (N/2*(N+1))) ));
//
// Test: private clause on omp teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES private(p)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p[2]; \
p[0] = 2; p[1] = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p[0] = C[i] + D[i]; \
p[1] = D[i] + E[i]; \
A[idx][i] += p[0]; \
B[idx][i] += p[1]; \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) 6 + SUMS * (N/2*(N+1))))
//
// Test: firstprivate clause on omp teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES firstprivate(p)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p[2]; \
p[0] = -4; p[1] = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i] + p[0]; \
B[idx][i] += D[i] + E[i] + p[1]; \
if (i == N-1) { \
p[0] += 6; \
p[1] += 9; \
} \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: collapse clause on omp teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES collapse(2)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[idx][i*3+j] += C[i*3+j] + D[i*3+j]; \
B[idx][i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: ordered clause on omp teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES ordered
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
,
for (int i = 0; i < N; i++) { \
_Pragma("omp ordered") \
S[idx] += C[i] + D[i]; \
}
,
{
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: Ensure coalesced scheduling on GPU.
//
if (!cpuExec) {
TESTD("omp target", {
_Pragma("teams distribute parallel for num_teams(tms) num_threads(th)")
for (int idx = 0; idx < tms*th; idx++) {
S[idx] = 0;
for (int i = 0; i < 96; i++) {
A[idx][i] = 0;
}
_Pragma("omp parallel for num_threads(32)")
for (int i = 0; i < 96; i++) {
A[idx][i] += i - omp_get_thread_num();
}
_Pragma("omp parallel for schedule(auto) num_threads(32)")
for (int i = 0; i < 96; i++) {
A[idx][i] += i - omp_get_thread_num();
}
_Pragma("omp parallel for schedule(static,1) num_threads(32)")
for (int i = 0; i < 96; i++) {
A[idx][i] += i - omp_get_thread_num();
}
double tmp = 0;
for (int i = 0; i < 96; i++) {
tmp += A[idx][i];
}
S[idx] = tmp;
}
}, VERIFY(0, tms*th, S[i], (double) 3 * (32*32 + 64*32) ));
} else {
DUMP_SUCCESS(1);
}
return 0;
}
|
GB_binop__rminus_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__rminus_fp64
// A.*B function (eWiseMult): GB_AemultB__rminus_fp64
// A*D function (colscale): GB_AxD__rminus_fp64
// D*A function (rowscale): GB_DxB__rminus_fp64
// C+=B function (dense accum): GB_Cdense_accumB__rminus_fp64
// C+=b function (dense accum): GB_Cdense_accumb__rminus_fp64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_fp64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_fp64
// C=scalar+B GB_bind1st__rminus_fp64
// C=scalar+B' GB_bind1st_tran__rminus_fp64
// C=A+scalar GB_bind2nd__rminus_fp64
// C=A'+scalar GB_bind2nd_tran__rminus_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (y - x) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_FP64 || GxB_NO_RMINUS_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__rminus_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__rminus_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__rminus_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__rminus_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__rminus_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__rminus_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__rminus_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__rminus_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__rminus_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double bij = Bx [p] ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__rminus_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB_bind1st_tran__rminus_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB_bind2nd_tran__rminus_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__sqrt_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sqrt_fp32_fp32)
// op(A') function: GB (_unop_tran__sqrt_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = sqrtf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = sqrtf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = sqrtf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SQRT || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sqrt_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = sqrtf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = sqrtf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sqrt_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bfm_evo.h | /* -*- mode:c++; c-basic-offset:2 -*- */
/****************************************************************************/
/* Aug 2012 */
/* Hantao Yin */
/* */
/* bfm_evo.h */
/* functions added by Hantao (mainly DWF-like fermion evolution related). */
/* */
/****************************************************************************/
#ifndef INCLUDED_BFM_EVO_HT_H
#define INCLUDED_BFM_EVO_HT_H
#include <stdio.h>
#include <bagel_int.h>
#include <bfm.h>
#include <bfm_qdp.h>
#include <omp.h>
#include <math.h>
#include <vector>
#include <util/gjp.h>
#include "bfm_evo_aux.h"
enum
{ Export = 0, Import = 1 };
// FIXME: it inherits from bfm_qdp for the sole reason of using its
// importGauge() function. I'm too lazy to do any manual
// shifts/conjugates ......
template <class Float>
class bfm_evo : public bfm_qdp<Float> {
public:
// BFM has this
// enum {Even = 0, Odd};
integer cps_idx_cb(int x[4], int s, int reim, int i, int i_size);
integer cps_idx_cb_gparity(int x[4], int s, int reim, int i, int i_size, int flav);
// s outer most
integer cps_idx(int x[4], int s, int reim, int i, int i_size);
integer cps_idx_gparity(int x[4], int s, int reim, int i, int i_size, int flav);
// s inner most (but outside color and spin)
integer cps_idx_s(int x[4], int s, int reim, int i, int i_size);
// index for 4d fermion field
integer cps_idx_4d (int x[4], int reim, int i, int i_size);
integer cps_idx_s_gparity(int x[4], int s, int reim, int i, int i_size, int flav);
// compute the vector pair (v1, v2) needed to calculate fermion force.
void calcMDForceVecs(Fermion_t v1[2], Fermion_t v2[2],
Fermion_t phi1, Fermion_t phi2);
void Booee(Fermion_t psi, Fermion_t chi, int dag);
// Ritz method used to compute the maximum/minimum eigenvalue of M^\dag M.
// Use algorithm presented in arXiv: hep-lat/9507023.
//
// If compute_min == true then we compute the minmum eigenvalue of
// M^\dag M, otherwise we compute the maximum eigenvalue, i.e. the
// negative of the minimum eigenvalue of -M^\dag M.
double ritz(Fermion_t x, int compute_min);
// solve a propagator, for HtCayleyTanh this is just unpreconditioned CG
// for HmCayleyTanh this is D^{-1} Dminus acting on in[2].
// int prop_solve(Fermion_t out[2], Fermion_t in[2]);
// ======================================================================
// these functions need to be rewritten to fit into bfm style.
// currently they contain both bfm and CPS style gauge/fermion fields.
#if 0 //testing
private:
#endif
// auxiliary functions used by compute_force().
//CK: gpf1_offset_p is the offset to reach the second G-parity flavour in the vectors v1p and v2p.
// Its value depends on whether v1p/v2p are internal vectors (24*5dvol) or in the buffer send
// from the next node (24*Ls*3dsurfvol where 3dsurfvol is the 3d surface volume in the comms direction)
void fforce_site(Float *mom, Float *gauge,
Float *v1, Float *v1p,
Float *v2, Float *v2p, int mu, Float coef, int gpf1_offset_p = 0);
void fforce_internal(Float *mom, Float *gauge,
Float *v1, Float *v2, // internal data
Float coef, int mu,
int me, int nthreads);
void fforce_surface(Float *mom, Float *gauge,
Float *v1, Float *v2, // internal data
Float *v1_s, Float *v2_s, // surface data
Float coef, int mu);
void copySendFrmData(Float v3d[], Float v4d[], int mu, bool send_neg);
// complex version of axpy()
void axpy_c(Fermion_t r, Fermion_t x, Fermion_t y, std::complex<double> a, Fermion_t tmp) {
printf("void axpy_c temporarily disabled\n");
exit(-1);
#if 0
this->zaxpy(r, x, y, a);
#endif
}
public:
void thread_work_partial_nobarrier(int nwork, int me, int nthreads,
int &mywork, int &myoff)
{
int basework = nwork / nthreads;
int backfill = nthreads - (nwork % nthreads);
mywork = (nwork + me) / nthreads;
myoff = basework * me;
if ( me > backfill )
myoff += (me-backfill);
}
// compute fermion force:
//
// mom += coef * (phiL^\dag e_i(M) \phiR + \phiR^\dag e_i(M^\dag) \phiL)
//
// For BFM M is M = M_oo - M_oe M^{-1}_ee M_eo
void compute_force(Float *mom, Float *gauge, Fermion_t phiL, Fermion_t phiR, double coef);
#if 0
//CHECK CODE
template<typename FloatEXT>
void thread_impexFermion_s_test(FloatEXT *psi, Fermion_t handle[2], int doimport);
#endif
// psi assumes the following order: (color, spin, s, x, y, z, t),
// mainly used to import/export the "v1" and "v2" vectors in evolution.
template<typename FloatEXT>
void thread_impexFermion_s(FloatEXT *psi, Fermion_t handle[2], int doimport);
Float *threadedAllocFloat(size_t size, int mem_type=mem_slow);
void threadedFreeFloat(Float *);
// bicg_M: Biconjugate gradient method on preconditioned Dirac
// operator (It never converges).
//
// FIXME: test code only, don't use it unless you know what you are
// doing.
int bicg_M(Fermion_t sol, Fermion_t src);
// bicgstab_M: Biconjugate gradient stabilized method on
// preconditioned Dirac operator.
//
// FIXME: test code only, don't use it unless you know what you are
// doing.
int bicgstab_M(Fermion_t sol, Fermion_t src);
// GCR, solves M x = b
int gcr_M(Fermion_t sol, Fermion_t src);
// GMRES(m) solves M x = b.
//
// Restarts after m iterations.
int gmres_M(Fermion_t sol, Fermion_t src, const int m);
public:
//======================================================================
// the following member functions are single-threaded functions:
// ======================================================================
// psi assumes 5D even/odd preconditioned order: (color, spin, x, y, z, t, s)/2
template<typename FloatEXT>
void cps_impexcbFermion(FloatEXT *psi, Fermion_t handle, int doimport, int cb);
// psi assumes regular canonical order: (color, spin, x, y, z, t, s)
template<typename FloatEXT>
void cps_impexFermion(FloatEXT *psi, Fermion_t handle[2], int doimport);
// psi assumes the following order: (color, spin, s, x, y, z, t),
// mainly used to import/export the "v1" and "v2" vectors in evolution.
template<typename FloatEXT>
void cps_impexFermion_s(FloatEXT *psi, Fermion_t handle[2], int doimport);
// template<typename FloatEXT>
// void cps_importGauge(FloatEXT *importme);
// Imports a 4D CPS fermion to a 5D BFM fermion, putting the left-handed
// part at s=0 and the right-handed part at s=Ls-1. (Or does the inverse,
// exporting a 5D BFM fermion to a 4D CPS fermion).
// psi assumes regular canonical order: (color, spin, x, y, z, t)
template < typename FloatEXT >
void cps_impexFermion_4d (FloatEXT * psi, Fermion_t handle[2],
int doimport, bool prezero = true)
// Imports a 4D CPS fermion to a 5d BFM fermion, putting the left-handed
// part at s=0 and the right-handed part at s=Ls-1. (Or does the inverse,
// exporting a 5D BFM fermion to a 4D CPS fermion).
//template < class Float > template < typename FloatEXT >
// void bfm_evo < Float >::cps_impexFermion_4d (FloatEXT * psi,
// Fermion_t handle[2],
// int doimport, bool prezero)
{
if (doimport && prezero)
{
#pragma omp parallel
{
// zero out 5d bulk since we only import to the walls
this->set_zero (handle[Even]);
this->set_zero (handle[Odd]);
}
}
int Nspinco = 12;
int i_inc = this->simd () * 2;
int vol4d =
this->node_latt[0] *
this->node_latt[1] * this->node_latt[2] * this->node_latt[3];
omp_set_num_threads (this->nthread);
Float *bagel[2] = { (Float *) handle[0], (Float *) handle[1] };
#pragma omp parallel for
for (int site = 0; site < vol4d; site++)
{
int x[4];
int si = site;
x[0] = si % this->node_latt[0];
si = si / this->node_latt[0];
x[1] = si % this->node_latt[1];
si = si / this->node_latt[1];
x[2] = si % this->node_latt[2];
si = si / this->node_latt[2];
x[3] = si % this->node_latt[3];
int bidx_base_left = this->bagel_idx5d (x, 0, 0, 0, Nspinco, 1);
int bidx_base_right =
this->bagel_idx5d (x, this->Ls - 1, 0, 0, Nspinco, 1);
int cidx_base = this->cps_idx_4d (x, 0, 0, Nspinco);
for (int co = 0; co < Nspinco; co++)
{
// right-handed components are first six spin-color components
// left-handed components are last six spin-color components
int bidx_base;
int s;
if (co < 6)
{
bidx_base = bidx_base_right;
s = this->Ls - 1;
}
else
{
bidx_base = bidx_base_left;
s = 0;
}
int sp = this->precon_5d ? s : 0;
int cb = (x[0] + x[1] + x[2] + x[3] + sp) & 0x1;
for (int reim = 0; reim < 2; reim++)
{
int bidx = bidx_base + reim + co * i_inc;
int cidx = cidx_base + reim + co * 2;
if (doimport)
bagel[cb][bidx] = psi[cidx];
else
psi[cidx] = bagel[cb][bidx];
}
} //co, reim
} //xyzts
}
template < typename FloatEXT > void cps_importGauge (FloatEXT * importme);
#if 0
//CK: Appears to assume 'importme' is in canonical ordering
//template <class Float> template<typename FloatEXT>
//void bfm_evo<Float>::cps_importGauge(FloatEXT *importme)
{
int u_sz = Nd;
if(cps::GJP.Gparity()) u_sz *= 2; //U* fields are stacked on second set of Nd LatticeColorMatrix objects in the array
multi1d<LatticeColorMatrix> U(u_sz);
omp_set_num_threads(this->nthread);
int Ndircoco = 72;
int Ncoco = 9;
QDPdouble *U_p;
int vol4d =
this->node_latt[0] *
this->node_latt[1] * this->node_latt[2] * this->node_latt[3];
assert (vol4d>0 );
for (int muu=0;muu<u_sz;muu++) {
U_p = (QDPdouble *)&(U[muu].elem(0).elem());
int flav = muu / Nd; int mu = muu % Nd;
#pragma omp parallel for
for (int site=0;site<vol4d;site++ ) {
int x[4];
int s=site;
x[0]=s%this->node_latt[0]; s/=this->node_latt[0];
x[1]=s%this->node_latt[1]; s/=this->node_latt[1];
x[2]=s%this->node_latt[2]; s/=this->node_latt[2];
x[3]=s%this->node_latt[3];
int qidx_base = this->chroma_idx(x, 0, 0, Ncoco);
for(int coco = 0; coco < Ncoco; ++coco) {
for ( int reim = 0; reim < 2; ++reim) {
int qidx = qidx_base + reim + coco * 2;
int siteoff = mu + Nd * site + flav*Nd*vol4d; //Second G-parity flavour offset by Nd*vol4d
int cidx = reim + 2 * (coco + Ncoco * siteoff);
U_p[qidx] = importme[cidx];
}} // reim,coco
} // x
}//mu
// if(this->isBoss()) printf("before importGauge\n");
// to bfm
this->importGauge (U);
// if(this->isBoss()) printf("after importGauge\n");
}
#endif
//EigCG
#if 0 //THESE ARE IN BFM
Fermion_t allocCompactFermion (int mem_type=mem_slow);
Fermion_t threadedAllocCompactFermion (int mem_type=mem_slow);
void* threaded_alloc(int length, int mem_type=mem_slow);
void threaded_free(void *handle);
#endif
int EIG_CGNE_M(Fermion_t solution[2], Fermion_t source[2]);
int Eig_CGNE_prec(Fermion_t psi, Fermion_t src);
#if 0 //CK: leaving them in BFM
// copied from Jianglei's bfm
double CompactMprec(Fermion_t compact_psi,
Fermion_t compact_chi,
Fermion_t psi,
Fermion_t chi,
Fermion_t tmp,
int dag,int donrm=0) ;
// copied from Jianglei's bfm
void CompactMunprec(Fermion_t compact_psi[2],
Fermion_t compact_chi[2],
Fermion_t psi[2],
Fermion_t chi[2],
Fermion_t tmp,
int dag);
#endif
// do deflation using eigenvectors/eigenvalues from Rudy's Lanczos code.
void deflate(Fermion_t out, Fermion_t in,
const multi1d<Fermion_t [2]> *evec,
const multi1d<Float> *eval, int N);
void set_mass (double mass);
//#ifdef USE_NEW_BFM_GPARITY
#if 1
inline void axpby_ssp_proj(Fermion_t out, std::complex<double> a,Fermion_t x, std::complex<double> b,Fermion_t y,int sxo,int sy,int psign){
this->axpby_ssp_proj_complex(out,a.real(),a.imag(),x,b.real(),b.imag(),y,sxo,sy,psign);
}
#endif
};
// Simple utility function to set the mass and reinit if necessary.
template < class Float > void bfm_evo < Float >::set_mass (double mass)
{
if (this->mass != mass)
{
this->mass = mass;
this->GeneralisedFiveDimEnd ();
this->GeneralisedFiveDimInit ();
}
}
//CK: this function gives the offset within a checkerboarded vector
template<class Float>
integer bfm_evo<Float>::cps_idx_cb(int x[4], int s, int reim, int i, int i_size)
{
// int cb = ( x[0]+x[1]+x[2]+x[3] )&0x1;
int csite
=x[0] +this->node_latt[0]
*(x[1] + this->node_latt[1]
*(x[2] +this->node_latt[2]
*(x[3] +s*this->node_latt[3])));
csite /= 2;
// int cbvol = (this->node_latt[0]*
// this->node_latt[1]*
// this->node_latt[2]*
// this->node_latt[3]*
// this->cbLs)/2;
// return (cb*cbvol+csite)*i_size*2 + i*2 + reim;
return csite*i_size*2 + i*2 + reim;
}
//For G-parity the WILSON layout is 5d preconditioned
//| s=0 | s=1 | ......... | s = 0 |.....
//| odd f0 | odd f1 | even f0 | even f1 | ......... | even f0 | even f1 |.....
//where the blocks on the lowest line have their *4d* parity indicated. (5d parity) = [(4d parity) + s] % 2
//hence the first half of the full WILSON vector had 5d parity odd, and the second half 5d parity even
template<class Float>
integer bfm_evo<Float>::cps_idx_cb_gparity(int x[4], int s, int reim, int i, int i_size, int flav)
{
int s_off = this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3]; //2 4D half-volumes, one for each flavour
int f_off = s_off/2;
int csite
=x[0] +this->node_latt[0]
*(x[1] + this->node_latt[1]
*(x[2] +this->node_latt[2] * x[3]));
csite /= 2;
csite += flav * f_off + s*s_off;
return csite*i_size*2 + i*2 + reim;
}
template<class Float>
integer bfm_evo<Float>::cps_idx(int x[4], int s, int reim, int i, int i_size)
{
int csite =
x[0] + this->node_latt[0]
*(x[1] + this->node_latt[1]
*(x[2] +this->node_latt[2]
*(x[3] +s*this->node_latt[3])));
return (csite*i_size + i)*2 + reim;
}
template<class Float>
integer bfm_evo<Float>::cps_idx_gparity(int x[4], int s, int reim, int i, int i_size, int flav)
{
//For G-parity we have 2 flavours on each s-slice
int s_off = 2*this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3]; //2 4D volumes, one for each flavour
int f_off = s_off/2;
int csite =
x[0] + this->node_latt[0]
*(x[1] + this->node_latt[1]
*(x[2] +this->node_latt[2]*x[3]));
csite += s*s_off + flav * f_off;
return (csite*i_size + i)*2 + reim;
}
template<class Float>
integer bfm_evo<Float>::cps_idx_s(int x[4], int s, int reim, int i, int i_size)
{
int csite =
s + this->Ls
*(x[0] + this->node_latt[0]
*(x[1] + this->node_latt[1]
*(x[2] +this->node_latt[2]
*x[3])));
return (csite*i_size + i)*2 + reim;
}
template < class Float >
integer bfm_evo < Float >::cps_idx_4d (int x[4], int reim, int i,
int i_size)
{
int csite =
x[0] + this->node_latt[0]
* (x[1] + this->node_latt[1] * (x[2] + this->node_latt[2] * (x[3])));
return (csite * i_size + i) * 2 + reim;
}
template<class Float>
integer bfm_evo<Float>::cps_idx_s_gparity(int x[4], int s, int reim, int i, int i_size, int flav)
{
//This s-inner mapping is new here. Offset the second flavour by 1 5D volume, just like in bfm
int f_off = this->Ls * this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3];
int csite =
s + this->Ls
*(x[0] + this->node_latt[0]
*(x[1] + this->node_latt[1]
*(x[2] +this->node_latt[2]
*x[3])));
csite += flav * f_off;
return (csite*i_size + i)*2 + reim;
}
//CK: Note if the BFM preconditioning is 4D then the 4D checkerboard of the imported field will be the opposite of the 5D checkerboard of the CPS field! cb is the output checkerboard.
//The set of all sites with x+y+z+t+s odd is the same as the set of sites with x+y+z+t even, and vice versa.
template <class Float> template<typename FloatEXT>
void bfm_evo<Float>::cps_impexcbFermion(FloatEXT *psi, Fermion_t handle, int doimport, int cb)
{
int Nspinco=12;
int i_inc = this->simd() * 2;
int vol5d =
this->node_latt[0] *
this->node_latt[1] *
this->node_latt[2] *
this->node_latt[3] *
this->Ls;
Float *bagel = (Float *)handle;
omp_set_num_threads(this->nthread);
int work = vol5d;
if(cps::GJP.Gparity()) work*=2;
#pragma omp parallel for
for (int sf = 0; sf < work; sf++) {
int flav = sf;
int site = flav % vol5d; flav /= vol5d;
int x[4], s;
int si=site;
x[0]=si%this->node_latt[0]; si=si/this->node_latt[0];
x[1]=si%this->node_latt[1]; si=si/this->node_latt[1];
x[2]=si%this->node_latt[2]; si=si/this->node_latt[2];
x[3]=si%this->node_latt[3];
s =si/this->node_latt[3];
int sp = this->precon_5d ? s : 0;
if ( (x[0]+x[1]+x[2]+x[3] + (sp &0x1)) == cb ) {
int bidx_base;
int cidx_base;
#ifdef BFM_GPARITY
bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1,flav);
cidx_base = cps::GJP.Gparity() ? this->cps_idx_cb_gparity(x, s, 0, 0, Nspinco, flav) : this->cps_idx_cb(x, s, 0, 0, Nspinco);
#else
bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1);
cidx_base = this->cps_idx_cb(x, s, 0, 0, Nspinco);
#endif
for ( int co=0;co<Nspinco;co++ ) {
for ( int reim=0;reim<2;reim++ ) {
// int bidx = bagel_idx(x, reim, co + Nspinco * (s / 2), Nspinco * this->cbLs, 1);
// int bidx = this->bagel_idx5d(x, s, reim, co, Nspinco, 1);
// int cidx = cps_idx_cb(x, s, reim, co, Nspinco);
int bidx = bidx_base + reim + co * i_inc;
int cidx = cidx_base + reim + co * 2;
if ( doimport ) bagel[bidx] = psi[cidx];
else psi[cidx] = bagel[bidx] ;
}}//co,reim
}//cb
}//xyzts
}
//Convert a bfm-style Fermion_t pair to or from a CANONICAL format CPS-style fermion
//if doimport == 0 psi is the output and handle the input
//if doimport == 1 handle is the output and psi the input
template <class Float> template<typename FloatEXT>
void bfm_evo<Float>::cps_impexFermion(FloatEXT *psi, Fermion_t handle[2], int doimport)
{
int Nspinco=12;
int i_inc = this->simd() * 2;
int vol5d =
this->node_latt[0] *
this->node_latt[1] *
this->node_latt[2] *
this->node_latt[3] *
this->Ls;
omp_set_num_threads(this->nthread);
Float *bagel[2] = { (Float *)handle[0], (Float *)handle[1] };
int work = vol5d;
if(cps::GJP.Gparity()) work*=2;
#pragma omp parallel for
for (int sf = 0; sf < work; sf++) {
int flav = sf;
int site = flav % vol5d; flav /= vol5d;
int x[4], s;
int si=site;
x[0]=si%this->node_latt[0]; si=si/this->node_latt[0];
x[1]=si%this->node_latt[1]; si=si/this->node_latt[1];
x[2]=si%this->node_latt[2]; si=si/this->node_latt[2];
x[3]=si%this->node_latt[3];
s =si/this->node_latt[3];
int sp = this->precon_5d ? s : 0;
int cb = x[0]+x[1]+x[2]+x[3]+sp &0x1;
int bidx_base;
int cidx_base;
#ifdef BFM_GPARITY
bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1, flav);
cidx_base = cps::GJP.Gparity() ? this->cps_idx_gparity(x, s, 0, 0, Nspinco, flav) : this->cps_idx(x, s, 0, 0, Nspinco);
#else
bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1);
cidx_base = this->cps_idx(x, s, 0, 0, Nspinco);
#endif
for ( int co=0;co<Nspinco;co++ ) {
for ( int reim=0;reim<2;reim++ ) {
// int bidx = bagel_idx(x, reim, co + Nspinco * (s / 2), Nspinco * this->cbLs, 1);
// int bidx = this->bagel_idx5d(x, s, reim, co, Nspinco, 1);
// int cidx = cps_idx(x, s, reim, co, Nspinco);
int bidx = bidx_base + reim + co * i_inc;
int cidx = cidx_base + reim + co * 2;
if ( doimport ) bagel[cb][bidx] = psi[cidx];
else psi[cidx] = bagel[cb][bidx];
}}//co, reim
}//xyzts
}
//Convert a bfm style Fermion_t pair (left,right) to a 's-ordered' fermion
//if doimport == 0 the input is handle and the output psi
//if doimport == 1 the input is psi and the output handle
template <class Float> template<typename FloatEXT>
void bfm_evo<Float>::cps_impexFermion_s(FloatEXT *psi, Fermion_t handle[2], int doimport)
{
int Nspinco=12;
int i_inc = this->simd() * 2;
int vol5d =
this->node_latt[0] *
this->node_latt[1] *
this->node_latt[2] *
this->node_latt[3] *
this->Ls;
omp_set_num_threads(this->nthread);
Float *bagel[2] = { (Float *)handle[0], (Float *)handle[1] };
int work = vol5d;
if(cps::GJP.Gparity()) work*=2;
#pragma omp parallel for
for (int sf = 0; sf < work; sf++) {
int flav = sf;
int site = flav % vol5d; flav /= vol5d;
int x[4], s;
int si=site;
s =si%this->Ls; si=si/this->Ls;
x[0]=si%this->node_latt[0]; si=si/this->node_latt[0];
x[1]=si%this->node_latt[1]; si=si/this->node_latt[1];
x[2]=si%this->node_latt[2];
x[3]=si/this->node_latt[2];
int sp = this->precon_5d ? s : 0;
int cb = (x[0]+x[1]+x[2]+x[3]+sp) & 0x1;
int bidx_base;
int cidx_base;
#ifdef BFM_GPARITY
bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1, flav);
cidx_base = cps::GJP.Gparity() ? this->cps_idx_s_gparity(x, s, 0, 0, Nspinco, flav) : this->cps_idx_s(x, s, 0, 0, Nspinco);
#else
bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1);
cidx_base = this->cps_idx_s(x, s, 0, 0, Nspinco);
#endif
for ( int co=0;co<Nspinco;co++ ) {
for ( int reim=0;reim<2;reim++ ) {
// int bidx = this->bagel_idx5d(x, s, reim, co, Nspinco, 1);
// int cidx = cps_idx_s(x, s, reim, co, Nspinco);
int bidx = bidx_base + reim + co * i_inc;
int cidx = cidx_base + reim + co * 2;
if ( doimport ) bagel[cb][bidx] = psi[cidx];
else psi[cidx] = bagel[cb][bidx];
}}//co, reim
}//xyzts
}
#if 0
//This is check code
template <class Float> template<typename FloatEXT>
void bfm_evo<Float>::thread_impexFermion_s_test(FloatEXT *psi, Fermion_t handle[2], int doimport)
{
int Nspinco=12;
int i_inc = this->simd() * 2;
int vol5d =
this->node_latt[0] *
this->node_latt[1] *
this->node_latt[2] *
this->node_latt[3] *
this->Ls;
int me, thrlen, throff;
int work = vol5d;
if(cps::GJP.Gparity()) work*=2;
this->thread_work(work, me, thrlen, throff);
Float *bagel[2] = { (Float *)handle[0], (Float *)handle[1] };
for (int site = 0; site < thrlen; ++site) {
int flav = site + throff;
int site = flav % vol5d; flav /= vol5d;
int x[4], s;
int si=site;
s =si%this->Ls; si=si/this->Ls;
x[0]=si%this->node_latt[0]; si=si/this->node_latt[0];
x[1]=si%this->node_latt[1]; si=si/this->node_latt[1];
x[2]=si%this->node_latt[2];
x[3]=si/this->node_latt[2];
int sp = this->precon_5d ? s : 0;
int cb = x[0]+x[1]+x[2]+x[3]+sp & 0x1;
int bidx_base;
int cidx_base;
if(cps::GJP.Gparity()){
bidx_base = this->bagel_gparity_idx5d(x, s, 0, 0, Nspinco, 1, flav);
cidx_base = this->cps_idx_s_gparity(x, s, 0, 0, Nspinco, flav);
}else{
bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1);
cidx_base = this->cps_idx_s(x, s, 0, 0, Nspinco);
}
for ( int co=0;co<Nspinco;co++ ) {
for ( int reim=0;reim<2;reim++ ) {
// int bidx = this->bagel_idx5d(x, s, reim, co, Nspinco, 1);
// int cidx = cps_idx_s(x, s, reim, co, Nspinco);
int bidx = bidx_base + reim + co * i_inc;
int cidx = cidx_base + reim + co * 2;
if ( doimport ) bagel[cb][bidx] = psi[cidx];
else psi[cidx] = bagel[cb][bidx];
}}//co, reim
}//xyzts
}
#endif
//Convert a bfm style Fermion_t pair (left,right) to a 's-ordered' fermion
//if doimport == 0 the input is handle and the output psi
//if doimport == 1 the input is psi and the output handle
template <class Float> template<typename FloatEXT>
void bfm_evo<Float>::thread_impexFermion_s(FloatEXT *psi, Fermion_t handle[2], int doimport)
{
int Nspinco=12;
int i_inc = this->simd() * 2;
int vol5d =
this->node_latt[0] *
this->node_latt[1] *
this->node_latt[2] *
this->node_latt[3] *
this->Ls;
int me, thrlen, throff;
int work = vol5d;
if(cps::GJP.Gparity()) work*=2;
this->thread_work(work, me, thrlen, throff);
Float *bagel[2] = { (Float *)handle[0], (Float *)handle[1] };
for (int sf = 0; sf < thrlen; ++sf) {
int flav = sf + throff;
int site = flav % vol5d; flav /= vol5d;
int x[4], s;
int si=site;
s =si%this->Ls; si=si/this->Ls;
x[0]=si%this->node_latt[0]; si=si/this->node_latt[0];
x[1]=si%this->node_latt[1]; si=si/this->node_latt[1];
x[2]=si%this->node_latt[2];
x[3]=si/this->node_latt[2];
int sp = this->precon_5d ? s : 0;
int cb = x[0]+x[1]+x[2]+x[3]+sp & 0x1;
int bidx_base;
int cidx_base;
#ifdef BFM_GPARITY
bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1, flav);
cidx_base = cps::GJP.Gparity() ? this->cps_idx_s_gparity(x, s, 0, 0, Nspinco, flav) : this->cps_idx_s(x, s, 0, 0, Nspinco);
#else
bidx_base = this->bagel_idx5d(x, s, 0, 0, Nspinco, 1);
cidx_base = this->cps_idx_s(x, s, 0, 0, Nspinco);
#endif
for ( int co=0;co<Nspinco;co++ ) {
for ( int reim=0;reim<2;reim++ ) {
// int bidx = this->bagel_idx5d(x, s, reim, co, Nspinco, 1);
// int cidx = cps_idx_s(x, s, reim, co, Nspinco);
int bidx = bidx_base + reim + co * i_inc;
int cidx = cidx_base + reim + co * 2;
if ( doimport ) bagel[cb][bidx] = psi[cidx];
else psi[cidx] = bagel[cb][bidx];
}}//co, reim
}//xyzts
}
template <class Float>
Float * bfm_evo<Float>::threadedAllocFloat(size_t size, int mem_type)
{
int me = this->thread_barrier();
void *ret;
if ( me == 0 ) {
ret = bfm_alloc(size * sizeof(Float), mem_type);
}
ret = this->thread_bcast(me, ret);
this->thread_barrier();
return (Float *)ret;
}
template <class Float>
void bfm_evo<Float>::threadedFreeFloat(Float *f)
{
int me = this->thread_barrier();
if ( me == 0 ) {
bfm_free(f);
}
this->thread_barrier();
}
static inline int idx_4d(const int x[4], const int lx[4]) {
int ret = 0;
for(int i = 3; i >= 0; --i) {
ret = ret * lx[i] + x[i];
}
return ret;
}
static inline int idx_5d(const int x[5], const int lx[5]) {
int ret = 0;
for(int i = 4; i >= 0; --i) {
ret = ret * lx[i] + x[i];
}
return ret;
}
static inline int idx_4d_surf(const int x[4], const int lx[4], int mu) {
int ret = 0;
for(int i = 3; i >= 0; --i) {
if(i == mu) continue;
ret = ret * lx[i] + x[i];
}
return ret;
}
static inline int idx_5d_surf(const int x[5], const int lx[5], int mu) {
int ret = 0;
for(int i = 4; i >= 0; --i) {
if(i == mu) continue;
ret = ret * lx[i] + x[i];
}
return ret;
}
//CK: Appears to assume 'importme' is in canonical ordering
template <class Float> template<typename FloatEXT>
void bfm_evo<Float>::cps_importGauge(FloatEXT *importme)
{
int u_sz = Nd;
if(cps::GJP.Gparity()) u_sz *= 2; //U* fields are stacked on second set of Nd LatticeColorMatrix objects in the array
multi1d<LatticeColorMatrix> U(u_sz);
omp_set_num_threads(this->nthread);
int Ndircoco=72;
int Ncoco = 9;
QDPdouble *U_p;
int vol4d =
this->node_latt[0] *
this->node_latt[1] *
this->node_latt[2] *
this->node_latt[3];
for (int muu=0;muu<u_sz;muu++) {
U_p = (QDPdouble *)&(U[muu].elem(0).elem());
int flav = muu / Nd; int mu = muu % Nd;
#pragma omp parallel for
for (int site=0;site<vol4d;site++ ) {
int x[4];
int s=site;
x[0]=s%this->node_latt[0]; s/=this->node_latt[0];
x[1]=s%this->node_latt[1]; s/=this->node_latt[1];
x[2]=s%this->node_latt[2]; s/=this->node_latt[2];
x[3]=s%this->node_latt[3];
int qidx_base = this->chroma_idx(x, 0, 0, Ncoco);
for(int coco = 0; coco < Ncoco; ++coco) {
for ( int reim = 0; reim < 2; ++reim) {
// int qidx = this->chroma_idx(x,reim,coco,Ncoco);
int qidx = qidx_base + reim + coco * 2;
int siteoff = mu + Nd * site + flav*Nd*vol4d; //Second G-parity flavour offset by Nd*vol4d
int cidx = reim + 2 * (coco + Ncoco * siteoff);
U_p[qidx] = importme[cidx];
}} // reim,coco
} // x
}//mu
// to bfm
this->importGauge(U);
}
//CK: phi1 = Mprec phi2 for fermionic vectors.
//Calculates: (odd,even)
//v2 = (Boo phi2, Bee MeeInv Meo phi2)
//v1 = (phi1, MeeInv^dag Meo^dag phi1)
template <class Float>
void bfm_evo<Float>::calcMDForceVecs(Fermion_t v1[2], Fermion_t v2[2],
Fermion_t phi1, Fermion_t phi2)
{
// Meo is Wilson D times a matrix (see page 27 in Peter's draft).
// Moe/Meo: check bfmbase<Float>::G5D_Meo() in bfmdperp.C.
// Mee/Moo: check bfmbase<Float>::G5D_Mooee().
// Mee/Moo inverse: check bfmbase<Float>::G5D_MooeeInv().
//2kappa = 1/(5-M5)
// v2e = Bee * 2kappa * Meo phi2
this->Meo(phi2, v1[Odd], Even, DaggerNo); //Uses v1[Odd] as temp storage
this->MooeeInv(v1[Odd], v1[Even], DaggerNo);
this->Booee(v1[Even], v2[Even], DaggerNo);
// v2o = Boo phi2
this->Booee(phi2, v2[Odd], DaggerNo);
// v1e = 2kappa Meo^dag phi1
this->Meo(phi1, v1[Odd], Even, DaggerYes);
this->MooeeInv(v1[Odd], v1[Even], DaggerYes);
//CK: For WilsonTM, comparison to CPS version
//MooeeInv = 2 kappa g5theta(ctheta,-stheta)
//kappa = 1/[2 sqrt( (m+4)^2 + eps^2 )]
//ctheta = 2 (m+4) kappa
//stheta = 2 eps kappa
//g5theta(ctheta,stheta) = ctheta + i stheta g5
// v1o = 1oo phi1
this->copy(v1[Odd], phi1);
}
template <class Float>
void bfm_evo<Float>::Booee(Fermion_t psi, Fermion_t chi, int dag)
{
int Pminus=-1;
int Pplus=1;
// just copied the relevant part in G5D_Meo() over.
if ( (this->solver == HmCayleyTanh)
|| (this->solver == HtCayleyTanh)
|| (this->solver == HwCayleyTanh)
|| (this->solver == HwCayleyZolo)
|| (this->solver == HtCayleyZolo)
) {
if ( dag ) {
// Assemble the 5d matrix
for(int s=0;s<this->Ls;s++){
if ( s==0 ) {
this->axpby_ssp_proj(chi,this->beo[s],psi, -this->ceo[s+1] ,psi,s,s+1,Pplus);
this->axpby_ssp_proj(chi, 1.0,chi,this->mass*this->ceo[this->Ls-1],psi,s,this->Ls-1,Pminus);
} else if ( s==(this->Ls-1)) {
this->axpby_ssp_proj(chi,this->beo[s],psi,this->mass*this->ceo[0],psi,s,0,Pplus);
this->axpby_ssp_proj(chi,1.0,chi,-this->ceo[s-1],psi,s,s-1,Pminus);
} else {
this->axpby_ssp_proj(chi,this->beo[s],psi,-this->ceo[s+1],psi,s,s+1,Pplus);
this->axpby_ssp_proj(chi,1.0 ,chi,-this->ceo[s-1],psi,s,s-1,Pminus);
}
}
} else {
// Assemble the 5d matrix
for(int s=0;s<this->Ls;s++){
if ( s==0 ) {
// chi = bs psi[s] + cs[s] psi[s+1}
// chi += -mass*cs[s] psi[s+1}
this->axpby_ssp_proj(chi,this->beo[s],psi,-this->ceo[s],psi ,s, s+1,Pminus);
this->axpby_ssp_proj(chi,1.0,chi,this->mass*this->ceo[s],psi,s,this->Ls-1,Pplus);
} else if ( s==(this->Ls-1)) {
this->axpby_ssp_proj(chi,this->beo[s],psi,this->mass*this->ceo[s],psi,s,0,Pminus);
this->axpby_ssp_proj(chi,1.0,chi,-this->ceo[s],psi,s,s-1,Pplus);
} else {
this->axpby_ssp_proj(chi,this->beo[s],psi,-this->ceo[s],psi,s,s+1,Pminus);
this->axpby_ssp_proj(chi,1.0,chi,-this->ceo[s],psi,s,s-1,Pplus);
}
}
}
} else if(this->solver == DWF && this->precon_5d == 1) {
// Booee is the identity matrix in this case.
this->copy(chi, psi);
return;
} else if(this->solver == WilsonTM && this->precon_5d ==0){ //CK: I hope this is correct
this->copy(chi, psi);
return;
} else {
if ( this->isBoss() ) {
printf("Booee: method not implemented for this fermion type / preconditioning type\n");
}
exit(-1);
}
}
static inline double quad_solve(double *ct, double *st,
double a, double b, double c,
double d, double e, double f)
{
double p = b * (d - f) + e * (c - a);
double q = b * (d + f) - e * (c + a);
double r = 2 * (c * d - a * f);
// solve p + q * cos(2t) + r * sin(2t) = 0
double den = sqrt(q * q + r * r);
double ca = q / den;
double ci = sqrt(0.5 * (1 + ca));
double si = sqrt(0.5 * (1 - ca));
if(r < 0) si = -si;
double cb = -p / den;
if(fabs(cb) > 1.) {
printf("Panic: cos(psi) > 1\n");
exit(-1);
}
double cj = sqrt(0.5 * (1 + cb));
double sj = sqrt(0.5 * (1 - cb));
double ct1 = ci * cj + si * sj;
double st1 = si * cj - ci * sj;
double v1 =
(a * ct1 * ct1 + b * st1 * ct1 + c * st1 * st1)
/ (d * ct1 * ct1 + e * st1 * ct1 + f * st1 * st1);
double ct2 = ci * cj - si * sj;
double st2 = si * cj + ci * sj;
double v2 =
(a * ct2 * ct2 + b * st2 * ct2 + c * st2 * st2)
/ (d * ct2 * ct2 + e * st2 * ct2 + f * st2 * st2);
if(v1 < v2) {
*ct = ct1; *st = st1;
return v1;
} else {
*ct = ct2; *st = st2;
return v2;
}
}
// Ritz method used to compute the maximum/minimum eigenvalue of M^\dag M.
// Use algorithm presented in arXiv: hep-lat/9507023.
template <class Float>
double bfm_evo<Float>::ritz(Fermion_t x, int compute_min)
{
int me = this->thread_barrier();
double stop_rsd = this->residual * this->residual;
Fermion_t y = this->threadedAllocFermion();
Fermion_t p = this->threadedAllocFermion();
Fermion_t z = this->threadedAllocFermion();
Fermion_t t = this->threadedAllocFermion();
Fermion_t u = this->threadedAllocFermion();
double mu, pnorm, gnorm2;
// normalize x
double fact = this->norm(x);
fact = sqrt(1./ fact);
this->scale(x, fact);
if(this->isBoss() && !me) {
printf("bfm_evo::ritz <x, x> = %17.10e\n", 1. / (fact * fact));
}
// y = A x, A = MdagM or -MdagM
mu = this->Mprec(x, t, y, 0, 1); // t = Mpc x (y temp)
this->Mprec(t, y, u, 1); //y = Mpc^dag t (u temp)
if(! compute_min) {
this->scale(y, -1.); //y=-y
mu = -mu;
}
gnorm2 = this->axpy_norm(p, x, y, -mu); //p = -mu * x + y
pnorm = sqrt(gnorm2);
int i;
for(i = 0; i < this->max_iter; ++i) {
if(this->isBoss() && !me && i%100==0) {
printf("bfm_evo::ritz iter = %6d gnorm2 = %17.10e, targ gnorm2 = %17.10e, mu = %17.10e\n", i, gnorm2, stop_rsd, mu);
}
if(gnorm2 < stop_rsd) break;
// if(i % 100 == 0 && this->isBoss() && !me) {
// printf("bfm_evo::ritz iter = %6d gnorm2 = %17.10e, mu = %17.10e\n", i, gnorm2, mu);
// }
// z = A p
double pap = this->Mprec(p, t, z, 0, 1);
this->Mprec(t, z, u, 1);
if(! compute_min) {
this->scale(z, -1.);
pap = -pap;
}
// minimize x cos(theta) + p / pnorm * sin(theta) via theta
double d = this->norm(x);
double e = 2. * this->inner_real(x, p) / pnorm;
double f = 1.;
// double a = this->inner_real(x, y);
double a = mu * d;
double b = 2. * this->inner_real(x, z) / pnorm;
double c = pap / (pnorm * pnorm);
double ct,st;
mu = quad_solve(&ct, &st, a, b, c, d, e, f);
this->axpby(x, x, p, ct, st / pnorm);
this->axpby(y, y, z, ct, st / pnorm);
double gnew = this->axpy_norm(t, x, y, -mu);
double beta = ct * gnew / gnorm2;
gnorm2 = gnew;
// this->axpy(u, x, p, -st * pnorm); // ! not stable
double xpp = this->inner_real(x, p);
this->axpy(u, x, p, -xpp);
pnorm = sqrt(this->axpy_norm(p, u, t, beta));
}
if(! compute_min) mu = -mu;
// check eigenvalue again
double xnorm = this->norm(x);
double mux = this->Mprec(x, y, t, 0, 1);
this->Mprec(y, t, u, 1);
double mu_sq = this->norm(t);
if(this->isBoss() && !me) {
if(i < this->max_iter) {
printf("bfm_evo::ritz converged at iteration %d.\n", i);
} else {
printf("bfm_evo::ritz maximum iteration number reached!\n");
}
printf("bfm_evo::ritz ||x|| = %17.10e\n", sqrt(xnorm));
printf("bfm_evo::ritz three ways of computing the eigenvalue should agree.\n");
printf("bfm_evo::ritz eig1 = %17.10e\n", mu / xnorm);
printf("bfm_evo::ritz eig2 = %17.10e\n", mux / xnorm);
printf("bfm_evo::ritz eig3 = %17.10e\n", sqrt(mu_sq / xnorm));
}
this->threadedFreeFermion(y);
this->threadedFreeFermion(p);
this->threadedFreeFermion(z);
this->threadedFreeFermion(t);
this->threadedFreeFermion(u);
return mu / xnorm;
}
// FIXME: I'll need to replace getPlusData by something else.
// For now it works.
#include <comms/scu.h>
template <class Float>
void bfm_evo<Float>::copySendFrmData(Float v3d[], Float v4d[], int mu, bool send_neg)
{
int lclx[5] = {this->node_latt[0],
this->node_latt[1],
this->node_latt[2],
this->node_latt[3],
this->Ls};
int low[4] = { 0, 0, 0, 0 };
int high[4] = {lclx[0], lclx[1], lclx[2], lclx[3] };
low[mu] = send_neg ? 0 : lclx[mu] - 1; //pick out the slice at the boundary in the send direction
high[mu] = low[mu] + 1;
int block_size = 24 * lclx[4]; // s inner most
const int hl[4] = {high[0] - low[0],
high[1] - low[1],
high[2] - low[2],
high[3] - low[3] };
const int hl_sites = hl[0] * hl[1] * hl[2] * hl[3]; //3-volume on surface (hl[mu]=1) [in units of blocks of size 24*Ls]
const int vol4d = this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3];
int me, thrlen, throff;
int work = hl_sites; if(cps::GJP.Gparity()) work *=2;
this->thread_work(work, me, thrlen, throff);
for(int i = 0; i < thrlen; ++i) {
int x[4], flav;
int tmp = i + throff;
//For G-parity, fermion data blocks [each of size 24*Ls] increment in x,y,z,t,flav
//Use similar mapping for surface volume, with flav changing slowest (offset 1 * surface 3-volume blocks)
flav = tmp/hl_sites; tmp = tmp % hl_sites;
x[0] = tmp % hl[0] + low[0]; tmp /= hl[0];
x[1] = tmp % hl[1] + low[1]; tmp /= hl[1];
x[2] = tmp % hl[2] + low[2]; tmp /= hl[2];
x[3] = tmp % hl[3] + low[3];
int off_4d = idx_4d(x, lclx);
int off_3d = idx_4d_surf(x, lclx, mu);
if(cps::GJP.Gparity()){
//Implement G-parity flavour twist where appropriate. Note that the boundary sign on the boundary between C \bar{u}^T and d fields is implemented on the gauge links
//here so we do not need to explicitly apply it to the communicated data.
if(cps::GJP.Bc(mu) == cps::BND_CND_GPARITY && (send_neg && cps::GJP.NodeCoor(mu) == 0) || (!send_neg && cps::GJP.NodeCoor(mu) == cps::GJP.Nodes(mu)-1) ){
if(flav==0) memcpy(v3d + off_3d * block_size + hl_sites * block_size, v4d + off_4d * block_size, sizeof(Float) * block_size); //d -> CubarT buf
else memcpy(v3d + off_3d * block_size, v4d + off_4d * block_size + vol4d * block_size, sizeof(Float) * block_size); //CubarT -> d buf
}else{ //copy both flavours to their respective buffers
memcpy(v3d + off_3d * block_size, v4d + off_4d * block_size, sizeof(Float) * block_size); //d -> d
memcpy(v3d + off_3d * block_size + hl_sites * block_size, v4d + off_4d * block_size + vol4d * block_size, sizeof(Float) * block_size); //CubarT -> CubarT
}
}else{
memcpy(v3d + off_3d * block_size,
v4d + off_4d * block_size,
sizeof(Float) * block_size);
}
}
}
// Calculate fermion force on a specific site, also do the
// summation over s direction.
//
// FIXME: need to add a line sum in s direction to support splitting
// in s direction.
//CK: v1p = v1[x+mu]
// fermion vectors appear to be in CANONICAL ordering
template<class Float>
void bfm_evo<Float>::fforce_site(Float *mom, Float *gauge,
Float *v1, Float *v1p,
Float *v2, Float *v2p, int mu, Float coef,int gpf1_offset_p)
{
Float t1[18], t2[18];
if(cps::GJP.Gparity()) printf("flav 0\n");
printf("v1: ");
printf("%f %f ... %f",v1[0],v1[1],v1[24*this->Ls-1]);
printf("\n");
printf("v2: ");
printf("%f %f ... %f",v2[0],v2[1],v2[24*this->Ls-1]);
printf("\n");
printf("v1p: ");
printf("%f %f ... %f",v1p[0],v1p[1],v1p[24*this->Ls-1]);
printf("\n");
printf("v2p: ");
printf("%f %f ... %f",v2p[0],v2p[1],v2p[24*this->Ls-1]);
printf("\n");
printf("gauge: %f %f ...%f\n",gauge[0],gauge[1],gauge[17]);
printf("mom: %f %f ...%f\n",mom[0],mom[1],mom[17]);
switch(mu) {
case 0:
bfm_evo_aux::sprojTrXm(t1, v1p, v2, this->Ls, 0, 0);
bfm_evo_aux::sprojTrXp(t2, v2p, v1, this->Ls, 0, 0);
break;
case 1:
bfm_evo_aux::sprojTrYm(t1, v1p, v2, this->Ls, 0, 0);
bfm_evo_aux::sprojTrYp(t2, v2p, v1, this->Ls, 0, 0);
break;
case 2:
bfm_evo_aux::sprojTrZm(t1, v1p, v2, this->Ls, 0, 0);
bfm_evo_aux::sprojTrZp(t2, v2p, v1, this->Ls, 0, 0);
break;
default:
bfm_evo_aux::sprojTrTm(t1, v1p, v2, this->Ls, 0, 0);
bfm_evo_aux::sprojTrTp(t2, v2p, v1, this->Ls, 0, 0);
}
printf("Minus proj contrib: %f %f ... %f\n",t1[0],t1[1],t1[17]);
printf("Plus proj contrib: %f %f ... %f\n",t2[0],t2[1],t2[17]);
bfm_evo_aux::su3_add(t1, t2); //t1 -> t1 + t2
bfm_evo_aux::mDotMEqual(t2, gauge, t1); //t2 -> gauge * t1
bfm_evo_aux::trless_am(t2, -coef);
printf("Traceless AHmat contrib: %f %f ... %f\n",t2[0],t2[1],t2[17]);
if(cps::GJP.Gparity1fX()) for(int i=0;i<18;i++) t2[i]*=2.0; //double latt testing, not production code
bfm_evo_aux::su3_add(mom, t2);
if(cps::GJP.Gparity()){
//add force from second flavour
Float t1_f1[18], t2_f1[18];
const int vol4d = this->node_latt[0] * this->node_latt[1] * this->node_latt[2] * this->node_latt[3];
const int f1_off = 24*this->Ls * vol4d; //f1 offset by 5d volume in this ordering scheme
v1+=f1_off; v2+=f1_off;
v1p+=gpf1_offset_p; v2p+=gpf1_offset_p; //offset for 'plus' site depends on whether the data is stored in the buffer or the on-node vector
printf("flav 1\n");
printf("v1: ");
printf("%f %f ... %f",v1[0],v1[1],v1[24*this->Ls-1]);
printf("\n");
printf("v2: ");
printf("%f %f ... %f",v2[0],v2[1],v2[24*this->Ls-1]);
printf("\n");
printf("v1p: ");
printf("%f %f ... %f",v1p[0],v1p[1],v1p[24*this->Ls-1]);
printf("\n");
printf("v2p: ");
printf("%f %f ... %f",v2p[0],v2p[1],v2p[24*this->Ls-1]);
printf("\n");
Float *gauge_f1 = gauge + vol4d*18*4;
Float *mom_f1 = mom + vol4d*18*4;
printf("gauge: %f %f ...%f\n",gauge_f1[0],gauge_f1[1],gauge_f1[17]);
printf("mom: %f %f ...%f\n",mom_f1[0],mom_f1[1],mom_f1[17]);
switch(mu) {
case 0:
bfm_evo_aux::sprojTrXp(t1_f1, v1, v2p, this->Ls, 0, 0);
bfm_evo_aux::sprojTrXm(t2_f1, v2, v1p, this->Ls, 0, 0);
break;
case 1:
bfm_evo_aux::sprojTrYp(t1_f1, v1, v2p, this->Ls, 0, 0);
bfm_evo_aux::sprojTrYm(t2_f1, v2, v1p, this->Ls, 0, 0);
break;
case 2:
bfm_evo_aux::sprojTrZp(t1_f1, v1, v2p, this->Ls, 0, 0);
bfm_evo_aux::sprojTrZm(t2_f1, v2, v1p, this->Ls, 0, 0);
break;
default:
bfm_evo_aux::sprojTrTp(t1_f1, v1, v2p, this->Ls, 0, 0);
bfm_evo_aux::sprojTrTm(t2_f1, v2, v1p, this->Ls, 0, 0);
}
{
cps::Matrix a; a.Trans(t1_f1); Float *aa = (Float*) &a[0];
cps::Matrix b; b.Trans(t2_f1); Float *bb = (Float*) &b[0];
printf("Minus proj contrib: %f %f ... %f\n",aa[0],aa[1],aa[17]);
printf("Plus proj contrib: %f %f ... %f\n",bb[0],bb[1],bb[17]);
}
bfm_evo_aux::su3_add(t1_f1, t2_f1); //t1_f1 -> t1_f1 + t2_f1
//set it up to use the f1 gauge field (sign*U*), such that the boundary sign comes free
//this will need to be complex conjugated
bfm_evo_aux::mStarDotMTransEqual(t2_f1, gauge_f1, t1_f1); // do (U*)* t^T
bfm_evo_aux::trless_am(t2_f1, -coef);
printf("Traceless AHmat contrib: %f %f ... %f\n",t2_f1[0],t2_f1[1],t2_f1[17]);
bfm_evo_aux::su3_add(mom, t2_f1);
//setup momentum for the second flavour
for(int i=1;i<18;i+=2){ t2[i]*=-1; t2_f1[i]*=-1; } //mom[f1] is mom[f0]*
bfm_evo_aux::su3_add(mom_f1, t2);
bfm_evo_aux::su3_add(mom_f1, t2_f1);
}
}
template<class Float>
void bfm_evo<Float>::fforce_internal(Float *mom, Float *gauge,
Float *v1, Float *v2, // internal data
Float coef, int mu,
int me, int nthreads)
{
int lclx[5] = {this->node_latt[0],
this->node_latt[1],
this->node_latt[2],
this->node_latt[3],
this->Ls};
int low[4] = { 0, 0, 0, 0 };
int high[4] = { lclx[0], lclx[1], lclx[2], lclx[3] };
--high[mu]; //exclude the site on the boundary
int block_size = 24 * lclx[4];
const int hl[4] = {high[0] - low[0],
high[1] - low[1],
high[2] - low[2],
high[3] - low[3] };
const int hl_sites = hl[0] * hl[1] * hl[2] * hl[3];
const int gparity_vp_off = block_size * lclx[0] * lclx[1] * lclx[2] * lclx[3]; //offset of second flavour (not used when G-parity is off)
// note: some of the threads are dedicated to communication. There
// must be exactly *nthreads* threads executing this function, the
// variable *me* must range from 0 to nthreads - 1, inclusive.
int thrlen, throff;
this->thread_work_partial_nobarrier(hl_sites, me, nthreads,
thrlen, throff);
for(int i = 0; i < thrlen; ++i) {
int x[4];
int tmp = i + throff;
x[0] = tmp % hl[0] + low[0]; tmp /= hl[0];
x[1] = tmp % hl[1] + low[1]; tmp /= hl[1];
x[2] = tmp % hl[2] + low[2]; tmp /= hl[2];
x[3] = tmp % hl[3] + low[3];
int off_4d = idx_4d(x, lclx);
int gid = mu + 4 * off_4d;
int fid = block_size * off_4d;
int y[4] = {x[0], x[1], x[2], x[3]};
++y[mu];
int fidp = block_size * idx_4d(y, lclx);
//testing
int gx[4] = { x[0] + cps::GJP.XnodeSites()*cps::GJP.XnodeCoor(),
x[1] + cps::GJP.YnodeSites()*cps::GJP.YnodeCoor(),
x[2] + cps::GJP.ZnodeSites()*cps::GJP.ZnodeCoor(),
x[3] + cps::GJP.TnodeSites()*cps::GJP.TnodeCoor() };
if(cps::GJP.Gparity1fX()){
int flav = 0;
if( gx[0] >= cps::GJP.XnodeSites()*cps::GJP.Xnodes()/2 ){ gx[0] -= cps::GJP.XnodeSites()*cps::GJP.Xnodes()/2; flav = 1; }
printf("1f GP coord (%d %d %d %d) flav %d\n",gx[0],gx[1],gx[2],gx[3],flav);
}else if(cps::GJP.Gparity()){
printf("2f GP coord (%d %d %d %d)\n",gx[0],gx[1],gx[2],gx[3]);
}
//Note fforce_site computes the force on this site from both flavours in the case of G-parity BCs
this->fforce_site(mom + 18 * gid, gauge + 18 * gid,
v2 + fid, v2 + fidp,
v1 + fid, v1 + fidp, mu, coef, gparity_vp_off);
}
//GPARITY TESTING: COMPARE 1F AND 2F METHODS (NOT USED IN PRODUCTION CODE)
if(cps::GJP.Gparity1fX() && me==0){ //use only first thread for this (does not need to be fast as it is only testing)
this->thread_barrier();
printf("Patching up 1f G-parity force\n");
//want p_0' = p_0 + delta p_0 + cconj(delta p_1)
// p_1' = p_1 + delta p_1 + cconj(delta p_0)
//we did p_i' = p_i + 2 * delta p_i
//and we know p_1 = cconj(p_0)
//so we now do p_0' = 0.5* p_0' + 0.5* cconj(p_1')
//so we now do p_1' = 0.5* p_1' + 0.5* cconj(p_0')
//to fix this
int momsz = 4*18*cps::GJP.VolNodeSites();
Float *buf = (Float *)bfm_alloc(momsz * sizeof(Float) );
for(int ii=0;ii<momsz;ii++) buf[ii] = 0.0;
//Communicate \delta p from first half onto second half and vice versa
Float *data_buf = mom;
Float *send_buf = data_buf;
Float *recv_buf = buf;
if(cps::GJP.Xnodes()>1){
//pass between nodes
for(int i=0;i<cps::GJP.Xnodes()/2;i++){
cps::getMinusData((Float *)recv_buf, (Float *)send_buf, momsz , 0);
data_buf = recv_buf;
recv_buf = send_buf;
send_buf = data_buf;
}
}else{
//shift mom[mu] field by xsites/2
for(long i=0;i<cps::GJP.VolNodeSites();i++){
//i = (x + Lx*(y+Ly*(z+Lz*t) ) )
int x = i % cps::GJP.XnodeSites();
int pos_rem = i/cps::GJP.XnodeSites(); //(y+Ly*(z+Lz*t)
int x_from = (x + cps::GJP.XnodeSites()/2) % cps::GJP.XnodeSites();
int i_from = 18*mu + 18*4*(x_from + cps::GJP.XnodeSites()*pos_rem);
int i_to = 18*mu + 18*4*i;
for(int j=0;j<18;j++) buf[i_to+j] = mom[i_from+j];
}
data_buf = buf;
}
for(int i=0;i<cps::GJP.VolNodeSites();i++){ //do fixup step
int mat_off = 18*mu + 18*4*i;
for(int j=0;j<18;j++){
if(j%2==0) mom[mat_off+j] = mom[mat_off+j]/2.0 + data_buf[mat_off+j]/2.0;
else mom[mat_off+j] = mom[mat_off+j]/2.0 - data_buf[mat_off+j]/2.0;
}
}
bfm_free(buf);
}
}
template<class Float>
void bfm_evo<Float>::fforce_surface(Float *mom, Float *gauge,
Float *v1, Float *v2, // internal data
Float *v1_s, Float *v2_s, // surface data
Float coef, int mu)
{
int lclx[5] = {this->node_latt[0],
this->node_latt[1],
this->node_latt[2],
this->node_latt[3],
this->Ls};
int low[4] = { 0, 0, 0, 0 };
int high[4] = { lclx[0], lclx[1], lclx[2], lclx[3] };
low[mu] = lclx[mu] - 1;
int block_size = 24 * lclx[4];
const int hl[4] = {high[0] - low[0],
high[1] - low[1],
high[2] - low[2],
high[3] - low[3] };
int hl_sites = hl[0] * hl[1] * hl[2] * hl[3];
int me, thrlen, throff;
this->thread_work(hl_sites, me, thrlen, throff);
const int gparity_vp_off = block_size * hl_sites; //offset of second flavour (not used when G-parity is off)
for(int i = 0; i < thrlen; ++i) {
int x[4];
int tmp = i + throff;
x[0] = tmp % hl[0] + low[0]; tmp /= hl[0];
x[1] = tmp % hl[1] + low[1]; tmp /= hl[1];
x[2] = tmp % hl[2] + low[2]; tmp /= hl[2];
x[3] = tmp % hl[3] + low[3];
int off_4d = idx_4d(x, lclx);
int gid = mu + 4 * off_4d;
int fid = block_size * off_4d;
int fid_s = block_size * idx_4d_surf(x, lclx, mu);
this->fforce_site(mom + 18 * gid, gauge + 18 * gid,
v2 + fid, v2_s + fid_s,
v1 + fid, v1_s + fid_s, mu, coef,gparity_vp_off);
}
//GPARITY TESTING: COMPARE 1F AND 2F METHODS (NOT USED IN PRODUCTION CODE)
if(cps::GJP.Gparity1fX() && me==0){ //use only first thread for this (does not need to be fast as it is only testing)
this->thread_barrier();
printf("Patching up 1f G-parity force\n");
//want p_0' = p_0 + delta p_0 + cconj(delta p_1)
// p_1' = p_1 + delta p_1 + cconj(delta p_0)
//we did p_i' = p_i + 2 * delta p_i
//and we know p_1 = cconj(p_0)
//so we now do p_0' = 0.5* p_0' + 0.5* cconj(p_1')
//so we now do p_1' = 0.5* p_1' + 0.5* cconj(p_0')
//to fix this
int momsz = 4*18*cps::GJP.VolNodeSites();
Float *buf = (Float *)bfm_alloc(momsz * sizeof(Float) );
for(int ii=0;ii<momsz;ii++) buf[ii] = 0.0;
//Communicate \delta p from first half onto second half and vice versa
Float *data_buf = mom;
Float *send_buf = data_buf;
Float *recv_buf = buf;
if(cps::GJP.Xnodes()>1){
//pass between nodes
for(int i=0;i<cps::GJP.Xnodes()/2;i++){
cps::getMinusData((Float *)recv_buf, (Float *)send_buf, momsz , 0);
data_buf = recv_buf;
recv_buf = send_buf;
send_buf = data_buf;
}
}else{
//shift mom[mu] field by xsites/2
for(long i=0;i<cps::GJP.VolNodeSites();i++){
//i = (x + Lx*(y+Ly*(z+Lz*t) ) )
int x = i % cps::GJP.XnodeSites();
int pos_rem = i/cps::GJP.XnodeSites(); //(y+Ly*(z+Lz*t)
int x_from = (x + cps::GJP.XnodeSites()/2) % cps::GJP.XnodeSites();
int i_from = 18*mu + 18*4*(x_from + cps::GJP.XnodeSites()*pos_rem);
int i_to = 18*mu + 18*4*i;
for(int j=0;j<18;j++) buf[i_to+j] = mom[i_from+j];
}
data_buf = buf;
}
for(int i=0;i<cps::GJP.VolNodeSites();i++){ //do fixup step
int mat_off = 18*mu + 18*4*i;
for(int j=0;j<18;j++){
if(j%2==0) mom[mat_off+j] = mom[mat_off+j]/2.0 + data_buf[mat_off+j]/2.0;
else mom[mat_off+j] = mom[mat_off+j]/2.0 - data_buf[mat_off+j]/2.0;
}
}
bfm_free(buf);
}
}
// compute fermion force for Mobius class fermions:
// This is the threaded equivalent of fbfm::EvolveMemFforceBase() in CPS.
//
// mom += coef * (phiL^\dag e_i(M) \phiR + \phiR^\dag e_i(M^\dag) \phiL)
// M = M_oo - M_oe M^{-1}_ee M_eo
//
// IMPORTANT: at least 5 threads are needed for this function to work
// correctly since we want to interleave communication and the
// evaluation of internal forces.
template<class Float>
void bfm_evo<Float>::compute_force(Float *mom, Float *gauge, Fermion_t phiL, Fermion_t phiR, double coef)
{
int me = this->thread_barrier();
Fermion_t v1[2] = {this->threadedAllocFermion(), this->threadedAllocFermion()};
Fermion_t v2[2] = {this->threadedAllocFermion(), this->threadedAllocFermion()};
this->calcMDForceVecs(v1, v2, phiL, phiR);
// compute various sizes
int lclx[5] = {this->node_latt[0], this->node_latt[1], this->node_latt[2], this->node_latt[3], this->Ls};
int vol_5d = 24 * lclx[0] * lclx[1] * lclx[2] * lclx[3] * lclx[4];
int surf_size[4];
int surf_size_all = 0;
for(int i = 0; i < 4; ++i) {
surf_size[i] = vol_5d / lclx[i];
if(cps::GJP.Gparity()) surf_size[i] *=2; //2 flavours
surf_size_all += 2 * surf_size[i];
}
// calculate offset of surface vectors v1 and v2
int surf_v1[4], surf_v2[4];
surf_v1[0] = 0;
surf_v2[0] = surf_size[0];
for(int i = 1; i < 4; ++i) {
surf_v1[i] = surf_v1[i-1] + surf_size[i-1] * 2;
surf_v2[i] = surf_v1[i] + surf_size[i];
}
int fsize = vol_5d; if(cps::GJP.Gparity()) fsize*=2;
Float *v1f = this->threadedAllocFloat(fsize);
Float *v2f = this->threadedAllocFloat(fsize);
Float *sndbuf = this->threadedAllocFloat(surf_size_all);
Float *rcvbuf = this->threadedAllocFloat(surf_size_all);
this->thread_impexFermion_s(v1f, v1, 0);
this->thread_impexFermion_s(v2f, v2, 0);
for(int i = 0; i < 4; ++i) {
this->copySendFrmData(sndbuf + surf_v1[i], v1f, i, true);
this->copySendFrmData(sndbuf + surf_v2[i], v2f, i, true);
}
if(this->nthread <= 4) {
//#define DROPOUT_LT5THREADS
#ifdef DROPOUT_LT5THREADS
if(!me) {
printf("compute_force: Oops, at least 5 threads are needed.\n");
}
exit(-1);
#else
//CK: We can do it with less than 5 threads, but less efficiently (so this will work on a cluster/laptop)
if(me==0){ //Do comms on single thread
for(int dir=0; dir<4; dir++)
cps::getPlusData(rcvbuf + surf_v1[dir], sndbuf + surf_v1[dir],
surf_size[dir] * 2, dir);
}
for(int i = 0; i < 4; ++i) {
fforce_internal(mom, gauge, v1f, v2f, coef, i, me, this->nthread); //run over however many threads we have
}
#endif
}else{
// Fused comm/internal force.
//
// The last 4 threads (typically 60-63) are used for
// communication. All other threads (typically 0-59) are used to
// calculate internal forces.
// parallelize comm/internal force calculation
if(me >= this->nthread - 4) {
int dir = this->nthread - me - 1;
cps::getPlusData(rcvbuf + surf_v1[dir], sndbuf + surf_v1[dir],
surf_size[dir] * 2, dir);
} else {
for(int i = 0; i < 4; ++i) {
fforce_internal(mom, gauge, v1f, v2f, coef, i, me, this->nthread - 4);
}
}
}
this->thread_barrier();
for(int i = 0; i < 4; ++i) {
fforce_surface(mom, gauge, v1f, v2f,
rcvbuf + surf_v1[i], // v1 surface
rcvbuf + surf_v2[i], // v2 surface
coef, i);
}
this->threadedFreeFloat(v1f);
this->threadedFreeFloat(v2f);
this->threadedFreeFloat(sndbuf);
this->threadedFreeFloat(rcvbuf);
this->threadedFreeFermion(v1[0]);
this->threadedFreeFermion(v1[1]);
this->threadedFreeFermion(v2[0]);
this->threadedFreeFermion(v2[1]);
}
// complex version of axpy()
// template<class Float>
// void bfm_evo<Float>::axpy_c(Fermion_t r, Fermion_t x, Fermion_t y, std::complex<double> a, Fermion_t tmp)
// {
// this->copy(tmp, x);
// this->scale(tmp, std::real(a), std::imag(a));
// this->axpy(r, tmp, y, 1.0);
// }
// bicg_M: Biconjugate gradient method on preconditioned Dirac
// operator (It never converges).
//
// FIXME: test code only, don't use it unless you know what you are
// doing.
template<class Float>
int bfm_evo<Float>::bicg_M(Fermion_t sol, Fermion_t src)
{
int me = this->thread_barrier();
Fermion_t r = this->threadedAllocFermion();
Fermion_t rd = this->threadedAllocFermion();
Fermion_t p = this->threadedAllocFermion();
Fermion_t pd = this->threadedAllocFermion();
Fermion_t mp = this->threadedAllocFermion();
Fermion_t mdpd= this->threadedAllocFermion();
Fermion_t x = sol;
Fermion_t xd = this->threadedAllocFermion();
this->copy(xd, x);
Fermion_t tv1 = this->threadedAllocFermion();
Fermion_t tv2 = this->threadedAllocFermion();
const double src_norm = this->norm(src);
const double stop = src_norm * this->residual * this->residual;
this->Mprec(x , r , tv1, 0, 0);
this->Mprec(xd, rd, tv1, 1, 0);
double rnorm = this->axpy_norm(r , r , src, -1.0); // r0 <- b-M*x0
double rdnorm = this->axpy_norm(rd, rd, src, -1.0);// r0d <- b-Md*x0
if ( this->isBoss() && !me ) {
printf("iter = %5d rsd = %17.10e true rsd = %17.10e\n", 0, rnorm, rnorm);
}
this->copy(p, r);
this->copy(pd, rd);
std::complex<double> rddr = this->inner(rd, r);
int k = 1;
for(; k <= this->max_iter; ++k) {
this->Mprec(p, mp, tv1, 0, 0);
this->Mprec(pd, mdpd, tv1, 1, 0);
std::complex<double> pddmp = this->inner(pd, mp);
std::complex<double> alpha = rddr / pddmp;
this->axpy_c(x , p , x , alpha, tv1); // x <- x + alpha * p
this->axpy_c(xd, pd, xd, alpha, tv1); // xd <- xd + alpha * pd
this->axpy_c(r , mp , r , -alpha, tv1); // r <- r - alpha * Mp
this->axpy_c(rd, mdpd, rd, -alpha, tv1); // rd <- rd - alpha * Mdpd
rnorm = this->norm(r);
rdnorm = this->norm(rd);
// check stopping condition
if(rnorm < stop) {
// compute true residual
this->Mprec(x, tv2, tv1, 0, 0);
double true_rsd = this->axpy_norm(tv1, tv2, src, -1.0);
if(this->isBoss() && !me) {
printf("bicg_M: converged in %d iterations.\n", k);
printf("bicg_M: acc_rsd = %9.3e %9.3e true_rsd = %9.3e\n",
sqrt(rnorm/src_norm), sqrt(rdnorm/src_norm), sqrt(true_rsd/src_norm));
}
break;
}
std::complex<double> tmp = this->inner(rd, r);
std::complex<double> beta = tmp / rddr;
rddr = tmp;
this->axpy_c(p , p , r , beta, tv1); // p <- r + beta * p
this->axpy_c(pd, pd, rd, beta, tv1); // pd <- rd + beta * pd
// ======================================================================
// compare rsd and true rsd
this->Mprec(x, tv2, tv1, 0, 0);
double true_rsd = this->axpy_norm(tv2, tv2, src, -1.0);
if ( this->isBoss() && !me ) {
printf("iter = %5d rsd = %9.3e true rsd = %9.3e a = (%9.3e %9.3e) b = (%9.3e %9.3e)\n",
k, rnorm, true_rsd, real(alpha), imag(alpha), real(beta), imag(beta));
}
// ======================================================================
}
if(k > this->max_iter) {
if(this->isBoss() && !me) {
printf("bicg_M: not converged in %d iterations.\n", k);
}
}
this->threadedFreeFermion(r);
this->threadedFreeFermion(rd);
this->threadedFreeFermion(p);
this->threadedFreeFermion(pd);
this->threadedFreeFermion(mp);
this->threadedFreeFermion(mdpd);
this->threadedFreeFermion(xd);
this->threadedFreeFermion(tv1);
this->threadedFreeFermion(tv2);
return k;
}
// bicgstab_M: Biconjugate gradient stabilized method on
// preconditioned Dirac operator.
//
// FIXME: test code only, don't use it unless you know what you are
// doing.
template<class Float>
int bfm_evo<Float>::bicgstab_M(Fermion_t sol, Fermion_t src)
{
int me = this->thread_barrier();
Fermion_t r0 = this->threadedAllocFermion();
Fermion_t r = this->threadedAllocFermion();
Fermion_t p = this->threadedAllocFermion();
Fermion_t v = this->threadedAllocFermion();
Fermion_t s = this->threadedAllocFermion();
Fermion_t t = this->threadedAllocFermion();
Fermion_t x = sol;
Fermion_t tv1 = this->threadedAllocFermion();
Fermion_t tv2 = this->threadedAllocFermion();
const double src_norm = this->norm(src);
const double stop = src_norm * this->residual * this->residual;
this->Mprec(x, r0, tv1, 0, 0);
double r0n = this->axpy_norm(r0, r0, src, -1.0); // r0 <- b-M*x0, r0^hat = r0
this->copy(r, r0);
if ( this->isBoss() && !me ) {
printf("iter = %5d rsd = %17.10e true rsd = %17.10e\n", 0, r0n, r0n);
}
std::complex<double> rho(1, 0);
std::complex<double> alpha(1, 0);
std::complex<double> omega(1, 0);
this->set_zero(v);
this->set_zero(p);
int k = 1;
for(; k <= this->max_iter; ++k) {
std::complex<double> rho_k = this->inner(r0, r);
std::complex<double> beta = rho_k / rho * alpha / omega;
rho = rho_k;
this->axpy_c(tv1, v, p, -omega, tv2);
this->axpy_c(p, tv1, r, beta, tv2);
this->Mprec(p, v, tv1, 0, 0);
alpha = rho / this->inner(r0, v);
this->axpy_c(s, v, r, -alpha, tv1);
this->Mprec(s, t, tv1, 0, 0);
omega = this->inner(t, s) / this->norm(t);
this->axpy_c(x, p, x, alpha, tv1);
this->axpy_c(x, s, x, omega, tv1);
this->axpy_c(r, t, s, -omega, tv1);
// compute true residual
this->Mprec(x, tv2, tv1, 0, 0);
double true_rsd = this->axpy_norm(tv1, tv2, src, -1.0);
// check stopping condition
if(true_rsd < stop) {
if(this->isBoss() && !me) {
printf("bicgstab_M: converged in %d iterations.\n", k);
printf("bicgstab_M: true_rsd = %10.3e\n", sqrt(true_rsd/src_norm));
}
break;
}
// ======================================================================
// debug information
if ( this->isBoss() && !me ) {
printf("iter = %5d true rsd = %10.3e "
"rho = (%10.3e %10.3e) alpha = (%10.3e %10.3e) omega = (%10.3e %10.3e)\n",
k, true_rsd,
real(rho), imag(rho),
real(alpha), imag(alpha),
real(omega), imag(omega));
}
// ======================================================================
}
if(k > this->max_iter) {
if(this->isBoss() && !me) {
printf("bicgstab_M: not converged in %d iterations.\n", k);
}
}
this->threadedFreeFermion(r0);
this->threadedFreeFermion(r);
this->threadedFreeFermion(p);
this->threadedFreeFermion(v);
this->threadedFreeFermion(s);
this->threadedFreeFermion(t);
this->threadedFreeFermion(tv1);
this->threadedFreeFermion(tv2);
return k;
}
#if 0 //CK: in BFM, leaving them there!
// copied from Jianglei's bfm
template<typename Float>
double bfm_evo<Float>::CompactMprec(Fermion_t compact_psi,
Fermion_t compact_chi,
Fermion_t psi,
Fermion_t chi,
Fermion_t tmp,
int dag,int donrm)
{
this->copy(psi, compact_psi);
double result = this->Mprec(psi, chi, tmp, dag, donrm);
this->copy(compact_chi, chi);
return result;
}
// copied from Jianglei's bfm
template<typename Float>
void bfm_evo<Float>::CompactMunprec(Fermion_t compact_psi[2],
Fermion_t compact_chi[2],
Fermion_t psi[2],
Fermion_t chi[2],
Fermion_t tmp,
int dag)
{
this->copy(psi[0], compact_psi[0]);
this->copy(psi[1], compact_psi[1]);
this->Munprec(psi, chi, tmp, dag);
this->copy(compact_chi[0], chi[0]);
this->copy(compact_chi[1], chi[1]);
}
#endif
template<typename Float>
void bfm_evo<Float>::deflate(Fermion_t out, Fermion_t in,
const multi1d<Fermion_t [2]> *evec,
const multi1d<Float> *eval,
int N)
{
//CK: Why was this code disabled?? I have re-enabled it!
//printf("void bfm_evo<Float>::deflate temporarily disabled\n");
//exit(-1);
if(N == 0 || evec == NULL || eval == NULL) {
if(this->isBoss()) {
printf("bfm_evo::deflate() must provide eigenvectors.\n");
}
exit(-1);
}
this->axpby(out, in, in, 0., 0.);
//this->set_zero(out);
for(int i = 0; i < N; ++i) {
std::complex<double> dot = this->inner((*evec)[i][1], in);
//#ifdef BFM_GPARITY
#if 1
this->caxpy(out, (*evec)[i][1], out, dot.real() / double((*eval)[i]), dot.imag() / double((*eval)[i]) );
#else
this->zaxpy(out, (*evec)[i][1], out, dot / double((*eval)[i]));
#endif
}
}
// GCR, the matrix is preconditioned M.
template<class Float>
int bfm_evo<Float>::gcr_M(Fermion_t sol, Fermion_t src)
{
printf("int bfm_evo<Float>::gcr_M temporarily disabled");
exit(-1);
#if 0
int me = this->thread_barrier();
Fermion_t r = this->threadedAllocFermion();
Fermion_t gr = this->threadedAllocFermion();
Fermion_t agr = this->threadedAllocFermion();
Fermion_t p = this->threadedAllocFermion();
Fermion_t ap = this->threadedAllocFermion();
Fermion_t x = sol;
Fermion_t tv1 = this->threadedAllocFermion();
Fermion_t tv2 = this->threadedAllocFermion();
const double src_norm = this->norm(src);
const double stop = src_norm * this->residual * this->residual;
this->Mprec(x, r, tv2, 0, 0);
double rnorm = this->axpy_norm(r, r, src, -1.0); // r <- b - M x
if ( this->isBoss() && !me ) {
std::printf("gcr_M: iter = %5d rsd = %10.3e true rsd = %10.3e\n",
0, std::sqrt(rnorm / src_norm),
std::sqrt(rnorm / src_norm));
}
this->g5r5(gr, r);
this->Mprec(gr, agr, tv1, 0, 0);
this->copy(p, gr);
this->copy(ap, agr);
std::complex<double> ragr = this->inner(r, agr);
int k = 1;
for(; k <= this->max_iter; ++k) {
double pdmmp = this->norm(ap);
std::complex<double> alpha = ragr / pdmmp;
this->zaxpy(x, p, x, alpha);
this->zaxpy(r, ap, r, -alpha);
rnorm = this->norm(r);
if(rnorm < stop) {
if(this->isBoss() && !me) {
std::printf("gcr_M: converged in %d iterations.\n", k);
std::printf("gcr_M: rsd = %10.3e\n", std::sqrt(rnorm/src_norm));
}
break;
}
this->g5r5(gr, r);
this->Mprec(gr, agr, tv2, 0, 0);
std::complex<double> ragrn = this->inner(r, agr);
std::complex<double> beta = ragrn / ragr;
ragr = ragrn;
this->zaxpy(p, p, gr, beta);
this->zaxpy(ap, ap, agr, beta);
// ======================================================================
// Computing true residual and other information, the
// following can be removed without any effect on convergence.
this->Mprec(x, tv1, tv2, 0, 0);
double true_rsd = this->axpy_norm(tv1, tv1, src, -1.0);
if ( this->isBoss() && !me ) {
std::printf("gcr_M: iter = %5d rsd = %10.3e true_rsd = %10.3e\n",
k,
std::sqrt(rnorm / src_norm),
std::sqrt(true_rsd / src_norm));
}
// ======================================================================
}
if(k > this->max_iter) {
if(this->isBoss() && !me) {
std::printf("gcr_M: not converged in %d iterations.\n", k);
}
}
this->Mprec(x, tv1, tv2, 0, 0);
double true_rsd = this->axpy_norm(tv1, tv1, src, -1.0);
if(this->isBoss() && !me) {
std::printf("gcr_M: true_rsd = %10.3e\n",
std::sqrt(true_rsd/src_norm));
}
this->threadedFreeFermion(r);
this->threadedFreeFermion(gr);
this->threadedFreeFermion(agr);
this->threadedFreeFermion(p);
this->threadedFreeFermion(ap);
this->threadedFreeFermion(tv1);
this->threadedFreeFermion(tv2);
return k;
#endif
}
// GMRES(m), we restart after m iterations.
template<class Float>
int bfm_evo<Float>::gmres_M(Fermion_t sol, Fermion_t src, const int m)
{
printf("int bfm_evo<Float>::gmres_M temporarily disabled\n");
exit(-1);
#if 0
using namespace std;
typedef complex<double> cmplx;
int me = this->thread_barrier();
Fermion_t r = this->threadedAllocFermion();
Fermion_t w = this->threadedAllocFermion();
Fermion_t tv1 = this->threadedAllocFermion();
// the history of search directions
vector<Fermion_t> v(m + 1, NULL);
for(int i = 0; i <= m; ++i) {
v[i] = this->threadedAllocFermion();
}
vector<cmplx> H((m + 1) * m, 0);
vector<cmplx> R((m + 1) * m, 0);
vector<cmplx> B(m, 0);
vector<cmplx> C(m, 0);
vector<cmplx> S(m, 0);
vector<cmplx> Y(m, 0);
const double len = sqrt(this->norm(src));
const double stop = len * this->residual;
this->Mprec(sol, r, tv1, 0, 0);
double rsq = this->axpy_norm(r, r, src, -1.0); // r <- b - M x
int j = 0;
for(; j < this->max_iter / m; ++j) {
double beta = sqrt(rsq);
this->axpy(v[0], r, r, 1/beta - 1); // v[0] <- r / beta
B.assign(m, 0);
B[0] = beta;
int nr = m;
double rho = len;
for(int i = 0; i < m; ++i) {
this->Mprec(v[i], w, tv1, 0, 0);
// Arnoldi iteration
for(int k = 0; k <= i; ++k) {
H[k*m+i] = this->inner(v[k], w);
this->zaxpy(w, v[k], w, -H[k*m+i]);
}
double w2 = sqrt(this->norm(w));
H[(i+1)*m+i] = w2;
this->axpy(v[i+1], w, w, 1/w2 - 1);
R[0*m+i] = H[0*m+i];
// Givens transformation
for(int k = 1; k <= i; ++k) {
cmplx gamma = C[k-1] * R[(k-1)*m+i] + conj(S[k-1]) * H[k*m+i];
R[k*m+i] = -S[k-1] * R[(k-1)*m+i] + C[k-1] * H[k*m+i];
R[(k-1)*m+i] = gamma;
}
double rii = norm(R[i*m+i]);
double hii = norm(H[(i+1)*m+i]);
double delta = sqrt(rii + hii);
cmplx mu, tau;
if(rii < hii) {
mu = R[i*m+i] / H[(i+1)*m+i];
tau = conj(mu) / abs(mu);
} else {
mu = H[(i+1)*m+i] / R[i*m+i];
tau = mu / abs(mu);
}
C[i] = sqrt(rii) / delta;
S[i] = sqrt(hii) * tau / delta;
R[i*m+i] = C[i] * R[i*m+i] + conj(S[i]) * H[(i+1)*m+i];
B[i+1] = -S[i] * B[i];
B[i] *= C[i];
rho = abs(B[i+1]);
if(this->isBoss() && !me) {
std::printf("gmres: (j i) = %4d %4d rsd = %10.3e\n",
j, i, rho / len);
}
if(rho < stop) {
nr = i;
break;
}
}
for(int k = nr - 1; k >= 0; --k) {
Y[k] = B[k];
for(int i = k + 1; i < nr; ++i) {
Y[k] -= R[k*m+i] * Y[i];
}
Y[k] /= R[k*m+k];
this->zaxpy(sol, v[k], sol, Y[k]);
}
this->Mprec(sol, r, tv1, 0, 0);
rsq = this->axpy_norm(r, r, src, -1.0);
if(rho < stop) break;
}
if(j >= this->max_iter / m) {
if(this->isBoss() && !me) {
std::printf("gmres: not converged in %d iterations.\n", j);
}
}
if(this->isBoss() && !me) {
std::printf("gmres: true_rsd = %10.3e\n",
std::sqrt(rsq) / len);
}
this->threadedFreeFermion(r);
this->threadedFreeFermion(w);
this->threadedFreeFermion(tv1);
for(int i = 0; i <= m; ++i) {
this->threadedFreeFermion(v[i]);
}
return j;
#endif
}
#endif
|
Problem.h | #pragma once
#include "Constraint.h"
#include "Define.h"
#include "LinearSolver.h"
#include "Objective.h"
#ifdef EQLIB_USE_MKL
#include "PardisoLDLT.h"
#endif
#include "ProblemData.h"
#include "Settings.h"
#include "SimplicialLDLT.h"
#include "SparseStructure.h"
#include "Timer.h"
#include <omp.h>
#include <mutex>
#include <set>
#include <tuple>
#include <utility>
#include <vector>
namespace eqlib {
class Problem {
private: // types
using Type = Problem;
using ElementsF = std::vector<Pointer<Objective>>;
using ElementsG = std::vector<Pointer<Constraint>>;
using Equations = std::vector<Pointer<Equation>>;
using Variables = std::vector<Pointer<Variable>>;
struct Index {
index local;
index global;
Index(index local, index global)
: local(local)
, global(global)
{
}
bool operator<(const Index& other) const noexcept
{
return global < other.global;
}
};
private: // variables
double m_sigma;
int m_nb_threads;
int m_grainsize;
ElementsF m_elements_f;
ElementsG m_elements_g;
std::vector<index> m_active_elements_f;
std::vector<index> m_active_elements_g;
Equations m_equations;
Variables m_variables;
DenseMap<Pointer<Equation>, index> m_equation_indices;
DenseMap<Pointer<Variable>, index> m_variable_indices;
std::vector<index> m_element_f_nb_variables;
std::vector<index> m_element_g_nb_variables;
std::vector<index> m_element_g_nb_equations;
index m_max_element_n;
index m_max_element_m;
std::vector<std::vector<Index>> m_element_f_variable_indices;
std::vector<std::vector<Index>> m_element_g_equation_indices;
std::vector<std::vector<Index>> m_element_g_variable_indices;
std::vector<std::vector<index>> m_element_f_variable_indices_hi;
SparseStructure<double, int, true> m_structure_dg;
SparseStructure<double, int, true> m_structure_hm;
ProblemData m_data;
Pointer<LinearSolver> m_linear_solver;
public: // constructors
Problem()
{
}
Problem(ElementsF elements_f, ElementsG elements_g, const int nb_threads = 1, const int grainsize = 100)
: m_elements_f(std::move(elements_f))
, m_elements_g(std::move(elements_g))
, m_sigma(1.0)
, m_nb_threads(nb_threads)
, m_grainsize(grainsize)
, m_max_element_n(0)
, m_max_element_m(0)
, m_active_elements_f(length(m_elements_f))
, m_active_elements_g(length(m_elements_g))
{
Log::task_begin("Initialize problem...");
Timer timer;
const auto nb_elements_f = length(m_elements_f);
const auto nb_elements_g = length(m_elements_g);
Log::task_info("The objective consists of {} elements", nb_elements_f);
Log::task_info("The constraints consist of {} elements", nb_elements_g);
Log::task_step("Getting equations and variables...");
m_element_f_nb_variables.resize(nb_elements_f);
m_element_g_nb_variables.resize(nb_elements_g);
m_element_g_nb_equations.resize(nb_elements_g);
for (index i = 0; i < nb_elements_f; i++) {
const auto& element = *m_elements_f[i];
const index nb_variables = element.nb_variables();
m_element_f_nb_variables[i] = nb_variables;
m_max_element_n = std::max(m_max_element_n, nb_variables);
}
for (index i = 0; i < nb_elements_g; i++) {
const auto& element = *m_elements_g[i];
const index nb_equations = element.nb_equations();
const index nb_variables = element.nb_variables();
m_element_g_nb_variables[i] = nb_variables;
m_element_g_nb_equations[i] = nb_equations;
m_max_element_n = std::max(m_max_element_n, nb_variables);
m_max_element_m = std::max(m_max_element_m, nb_equations);
}
Log::task_step("Creating the set of unique equations...");
RobinSet<Pointer<Equation>> equation_set;
for (const auto& element : m_elements_g) {
for (const auto& equation : element->equations()) {
if (!equation->is_active()) {
continue;
}
const auto [_, is_new] = equation_set.insert(equation);
if (is_new) {
m_equations.push_back(equation);
}
}
}
Log::task_step("Creating the set of unique variables...");
RobinSet<Pointer<Variable>> variable_set;
for (const auto& element : m_elements_f) {
for (const auto& variable : element->variables()) {
if (!variable->is_active()) {
continue;
}
const auto [_, is_new] = variable_set.insert(variable);
if (is_new) {
m_variables.push_back(variable);
}
}
}
for (const auto& element : m_elements_g) {
for (const auto& variable : element->variables()) {
if (!variable->is_active()) {
continue;
}
const auto [_, is_new] = variable_set.insert(variable);
if (is_new) {
m_variables.push_back(variable);
}
}
}
const auto nb_equations = length(m_equations);
const auto nb_variables = length(m_variables);
Log::task_info("The problem contains {} variables", nb_variables);
Log::task_info("The problem contains {} constraint equations", nb_equations);
Log::task_step("Compute indices for variables and equations...");
m_equation_indices.set_empty_key(nullptr);
m_variable_indices.set_empty_key(nullptr);
m_equation_indices.resize(nb_equations);
m_variable_indices.resize(nb_variables);
for (index i = 0; i < length(m_equations); i++) {
const auto& equation = m_equations[i];
m_equation_indices[equation] = i;
}
for (index i = 0; i < length(m_variables); i++) {
const auto& variable = m_variables[i];
m_variable_indices[variable] = i;
}
Log::task_step("Compute indices for elements...");
// variable indices f
m_element_f_variable_indices.resize(nb_elements_f);
m_element_g_equation_indices.resize(nb_elements_g);
m_element_g_variable_indices.resize(nb_elements_g);
#pragma omp parallel if (m_nb_threads != 1) num_threads(m_nb_threads)
{
#pragma omp for schedule(dynamic, m_grainsize) nowait
for (index i = 0; i < nb_elements_f; i++) {
const auto& variables = m_elements_f[i]->variables();
std::vector<Index> variable_indices;
variable_indices.reserve(variables.size());
for (index local = 0; local < length(variables); local++) {
const auto& variable = variables[local];
if (!variable->is_active()) {
continue;
}
const auto global = m_variable_indices[variable];
variable_indices.emplace_back(local, global);
}
std::sort(variable_indices.begin(), variable_indices.end());
m_element_f_variable_indices[i] = std::move(variable_indices);
}
// equation indices g
#pragma omp for schedule(dynamic, m_grainsize) nowait
for (index i = 0; i < nb_elements_g; i++) {
const auto& equations = m_elements_g[i]->equations();
std::vector<Index> equation_indices;
equation_indices.reserve(equations.size());
for (index local = 0; local < length(equations); local++) {
const auto& equation = equations[local];
if (!equation->is_active()) {
continue;
}
const auto global = m_equation_indices[equation];
equation_indices.emplace_back(local, global);
}
m_element_g_equation_indices[i] = std::move(equation_indices);
}
// variable indices g
#pragma omp for schedule(dynamic, m_grainsize)
for (index i = 0; i < nb_elements_g; i++) {
const auto& variables = m_elements_g[i]->variables();
std::vector<Index> variable_indices;
variable_indices.reserve(variables.size());
for (index local = 0; local < length(variables); local++) {
const auto& variable = variables[local];
if (!variable->is_active()) {
continue;
}
const auto global = m_variable_indices[variable];
variable_indices.emplace_back(local, global);
}
std::sort(variable_indices.begin(), variable_indices.end());
m_element_g_variable_indices[i] = std::move(variable_indices);
}
}
Log::task_step("Analyse sparse patterns...");
const auto n = length(m_variables);
const auto m = length(m_equations);
std::vector<std::vector<index>> pattern_dg(m);
std::vector<std::vector<index>> pattern_hm(n);
std::vector<RobinSet<index>> pattern_dg_set(m);
std::vector<RobinSet<index>> pattern_hm_set(n);
#pragma omp parallel if (m_nb_threads != 1) num_threads(m_nb_threads) shared(pattern_dg, pattern_hm)
{
const auto current_nb_threats = omp_get_num_threads();
const auto thread_id = omp_get_thread_num();
for (index i = 0; i < length(m_elements_f); i++) {
const auto& variable_indices = m_element_f_variable_indices[i];
for (index row_i = 0; row_i < length(variable_indices); row_i++) {
const auto row = variable_indices[row_i];
if ((row.global / grainsize) % current_nb_threats != thread_id) {
continue;
}
for (index col_i = row_i; col_i < length(variable_indices); col_i++) {
const auto col = variable_indices[col_i];
pattern_hm_set[row.global].insert(col.global);
}
}
}
for (index i = 0; i < length(m_elements_g); i++) {
const auto& equation_indices = m_element_g_equation_indices[i];
const auto& variable_indices = m_element_g_variable_indices[i];
for (const auto row : equation_indices) {
if ((row.global / grainsize) % current_nb_threats != thread_id) {
continue;
}
for (const auto col : variable_indices) {
pattern_dg_set[row.global].insert(col.global);
}
}
for (index row_i = 0; row_i < length(variable_indices); row_i++) {
const auto row = variable_indices[row_i];
if ((row.global / grainsize) % current_nb_threats != thread_id) {
continue;
}
for (index col_i = row_i; col_i < length(variable_indices); col_i++) {
const auto col = variable_indices[col_i];
pattern_hm_set[row.global].insert(col.global);
}
}
}
for (index i = 0; i < m; i++) {
if ((i / grainsize) % current_nb_threats != thread_id) {
continue;
}
std::vector<index> tmp;
tmp.reserve(length(pattern_dg_set[i]));
tmp.insert(tmp.end(), pattern_dg_set[i].begin(), pattern_dg_set[i].end());
std::sort(tmp.begin(), tmp.end());
pattern_dg[i] = std::move(tmp);
}
for (index i = 0; i < n; i++) {
if ((i / grainsize) % current_nb_threats != thread_id) {
continue;
}
std::vector<index> tmp;
tmp.reserve(length(pattern_hm_set[i]));
tmp.insert(tmp.end(), pattern_hm_set[i].begin(), pattern_hm_set[i].end());
std::sort(tmp.begin(), tmp.end());
pattern_hm[i] = std::move(tmp);
}
}
Log::task_step("Allocate memory...");
m_structure_dg = SparseStructure<double, int, true>::from_pattern(m, n, pattern_dg);
m_structure_hm = SparseStructure<double, int, true>::from_pattern(n, n, pattern_hm);
Log::task_info("The hessian has {} nonzero entries ({:.3f}%)", m_structure_hm.nb_nonzeros(), m_structure_hm.density() * 100.0);
Log::task_info("The jacobian of the constraints has {} nonzero entries ({:.3f}%)", m_structure_dg.nb_nonzeros(), m_structure_dg.density() * 100.0);
m_data.resize(n, m, m_structure_dg.nb_nonzeros(), m_structure_hm.nb_nonzeros(), m_max_element_n, m_max_element_m);
Log::task_info("The problem occupies {} MB", m_data.values().size() * 8.0 / 1'024 / 1'024);
Log::task_step("Initialize linear solver...");
#ifdef EQLIB_USE_MKL
m_linear_solver = new_<PardisoLDLT>();
#else
m_linear_solver = new_<SimplicialLDLT>();
#endif
Log::task_step("Initialize element boundaries...");
m_element_f_variable_indices_hi.resize(nb_elements_f);
for (index i = 0; i < nb_elements_f; i++)
{
const auto& element_indices = m_element_f_variable_indices[i];
std::vector<index> element_hi(length(element_indices));
for (index row_i = 0; row_i < length(element_indices); row_i++) {
const auto row = element_indices[row_i];
const auto col = element_indices.back();
element_hi[row_i] = m_structure_hm.get_index(row.global, col.global) + 1;
}
m_element_f_variable_indices_hi[i] = std::move(element_hi);
}
Log::task_end("Problem initialized in {:.3f} sec", timer.ellapsed());
}
private: // methods: computation
template <index TOrder>
void compute_element_f(ProblemData& data, const index i)
{
static_assert(0 <= TOrder && TOrder <= 2);
const auto& element_f = *m_elements_f[i];
if (!element_f.is_active()) {
return;
}
const auto& variable_indices = m_element_f_variable_indices[i];
if (variable_indices.empty()) {
return;
}
const auto n = m_element_f_nb_variables[i];
index size_g = TOrder > 0 ? n : 0;
index size_h = TOrder > 1 ? n : 0;
Map<Vector> g(data.m_buffer.data(), size_g);
Map<Matrix> h(data.m_buffer.data() + size_g, size_h, size_h);
Timer timer_element_compute;
const double f = element_f.compute(g, h);
data.computation_time() += timer_element_compute.ellapsed();
Timer timer_element_assemble;
data.f() += f;
for (index row_i = 0; row_i < length(variable_indices) && TOrder > 0; row_i++) {
const auto row = variable_indices[row_i];
data.df(row.global) += g(row.local);
auto lo = m_structure_hm.get_first_index(row.global);
const auto hi = m_element_f_variable_indices_hi[i][row_i];
for (index col_i = row_i; col_i < length(variable_indices) && TOrder > 1; col_i++) {
const auto col = variable_indices[col_i];
const index index = m_structure_hm.get_index_bounded(col.global, lo, hi);
if (row.local < col.local) {
data.hm_value(index) += h(row.local, col.local);
} else {
data.hm_value(index) += h(col.local, row.local);
}
lo = index;
}
}
data.assemble_time() += timer_element_assemble.ellapsed();
}
template <index TOrder>
void compute_element_g(ProblemData& data, const index i)
{
static_assert(0 <= TOrder && TOrder <= 2);
const auto& element_g = *m_elements_g[i];
if (!element_g.is_active()) {
return;
}
const auto& equation_indices = m_element_g_equation_indices[i];
const auto& variable_indices = m_element_g_variable_indices[i];
if (equation_indices.empty() || variable_indices.empty()) {
return;
}
const auto m = m_element_g_nb_equations[i];
const auto n = m_element_g_nb_variables[i];
Timer timer_element_allocate;
Vector fs(m);
std::vector<Ref<Vector>> gs;
std::vector<Ref<Matrix>> hs;
gs.reserve(m);
hs.reserve(m);
for (index k = 0; k < m; k++) {
Map<Vector> g(data.m_buffer.data() + k * n, n);
Map<Matrix> h(data.m_buffer.data() + m * n + k * n * n, n, n);
gs.push_back(g);
hs.push_back(h);
}
Timer timer_element_compute;
element_g.compute(fs, gs, hs);
data.computation_time() += timer_element_compute.ellapsed();
Timer timer_element_assemble;
for (const auto& equation_index : equation_indices) {
const auto& equation = m_equations[equation_index.global];
data.g(equation_index.global) += fs(equation_index.local);
if constexpr (TOrder < 1) {
continue;
}
auto& local_g = gs[equation_index.local];
auto& local_h = hs[equation_index.local];
local_h *= equation->multiplier();
for (index row_i = 0; row_i < length(variable_indices); row_i++) {
const auto row = variable_indices[row_i];
const index dg_value_i = m_structure_dg.get_index(equation_index.global, row.global);
data.dg_value(dg_value_i) += local_g(row.local);
if constexpr (TOrder < 2) {
continue;
}
for (index col_i = row_i; col_i < length(variable_indices); col_i++) {
const auto col = variable_indices[col_i];
const index hm_value_i = m_structure_hm.get_index(row.global, col.global);
data.hm_value(hm_value_i) += local_h(row.local, col.local);
}
}
}
data.assemble_time() += timer_element_assemble.ellapsed();
}
public: // methods: computation
void update_active_elements()
{
m_active_elements_f.clear();
for (index i = 0; i < nb_elements_f(); i++) {
if (m_elements_f[i]->is_active()) {
m_active_elements_f.emplace_back(i);
}
}
m_active_elements_g.clear();
for (index i = 0; i < nb_elements_g(); i++) {
if (m_elements_g[i]->is_active()) {
m_active_elements_g.emplace_back(i);
}
}
}
template <bool TParallel, bool TInfo, index TOrder>
void compute()
{
static_assert(0 <= TOrder && TOrder <= 2);
if constexpr (TInfo) {
Log::task_begin("Compute problem...");
}
Timer timer;
m_data.set_zero<TOrder>();
update_active_elements();
if constexpr (TParallel) {
ProblemData l_data(m_data);
#pragma omp parallel if (m_nb_threads != 1) num_threads(m_nb_threads) firstprivate(l_data)
{
#pragma omp for schedule(dynamic, m_grainsize) nowait
for (index i = 0; i < nb_elements_f(); i++) {
compute_element_f<TOrder>(l_data, i);
}
if (sigma() != 1.0) {
l_data.f() *= sigma();
if constexpr (TOrder > 0) {
l_data.df() *= sigma();
}
if constexpr (TOrder > 1) {
l_data.hm() *= sigma();
}
}
#pragma omp for schedule(dynamic, m_grainsize) nowait
for (index i = 0; i < nb_elements_g(); i++) {
compute_element_g<TOrder>(l_data, i);
}
#pragma omp critical
m_data += l_data;
}
} else {
for (index i = 0; i < nb_elements_f(); i++) {
compute_element_f<TOrder>(m_data, i);
}
if (sigma() != 1.0) {
m_data.f() *= sigma();
if constexpr (TOrder > 0) {
m_data.df() *= sigma();
}
if constexpr (TOrder > 1) {
m_data.hm() *= sigma();
}
}
for (index i = 0; i < nb_elements_g(); i++) {
compute_element_g<TOrder>(m_data, i);
}
}
if constexpr (TInfo) {
Log::task_info("Element computation took {} sec", m_data.computation_time());
Log::task_info("Assembly of the system took {} sec", m_data.assemble_time());
Log::task_end("Problem computed in {:.3f} sec", timer.ellapsed());
}
}
template <bool TInfo, index TOrder>
void compute()
{
if (m_nb_threads == 1) {
compute<false, TInfo, TOrder>();
} else {
compute<true, TInfo, TOrder>();
}
}
template <bool TInfo>
void compute(const index order = 2)
{
if (m_nb_threads == 1) {
switch (order) {
case 0:
compute<false, TInfo, 0>();
break;
case 1:
compute<false, TInfo, 1>();
break;
case 2:
compute<false, TInfo, 2>();
break;
default:
throw std::invalid_argument("order");
}
} else {
switch (order) {
case 0:
compute<true, TInfo, 0>();
break;
case 1:
compute<true, TInfo, 1>();
break;
case 2:
compute<true, TInfo, 2>();
break;
default:
throw std::invalid_argument("order");
}
}
}
void compute(const index order = 2)
{
if (m_nb_threads == 1) {
switch (order) {
case 0:
compute<false, true, 0>();
break;
case 1:
compute<false, true, 1>();
break;
case 2:
compute<false, true, 2>();
break;
default:
throw std::invalid_argument("order");
}
} else {
switch (order) {
case 0:
compute<true, true, 0>();
break;
case 1:
compute<true, true, 1>();
break;
case 2:
compute<true, true, 2>();
break;
default:
throw std::invalid_argument("order");
}
}
}
public: // methods
Vector hm_inv_v(Ref<const Vector> v)
{
if (nb_variables() == 0) {
return Vector(0);
}
if (m_linear_solver->factorize(m_structure_hm.ia(), m_structure_hm.ja(), m_data.hm())) {
throw std::runtime_error("Factorization failed");
}
Vector x(nb_variables());
if (m_linear_solver->solve(m_structure_hm.ia(), m_structure_hm.ja(), m_data.hm(), v, x)) {
throw std::runtime_error("Solve failed");
}
return x;
}
Vector hm_v(Ref<const Vector> v) const
{
return hm().selfadjointView<Eigen::Upper>() * v.transpose();
}
Vector hm_diagonal()
{
Vector result(nb_variables());
for (int row = 0; row < nb_variables(); row++) {
const int i = m_structure_hm.ia(row);
result(row) = hm(i);
}
return result;
}
void set_hm_diagonal(Eigen::Ref<const Vector> value)
{
for (int row = 0; row < nb_variables(); row++) {
const int i = m_structure_hm.ia(row);
hm(i) = value(row);
}
}
void hm_add_diagonal(const double value)
{
for (int row = 0; row < nb_variables(); row++) {
const int i = m_structure_hm.ia(row);
hm(i) += value;
}
}
double hm_norm_inf() const
{
Vector row_sum = Vector::Zero(nb_variables());
for (int row = 0; row < nb_variables(); row++) {
for (int i = m_structure_hm.ia(row); i < m_structure_hm.ia(row + 1); i++) {
const int col = m_structure_hm.ja(i);
const double abs_value = std::abs(hm(i));
row_sum(row) += abs_value;
if (row != col) {
row_sum(col) += abs_value;
}
}
}
return row_sum.maxCoeff();
}
void scale(const double factor)
{
m_data.values() *= factor;
}
Pointer<Problem> clone() const
{
auto new_problem = new_<Problem>(*this);
#ifdef EQLIB_USE_MKL
new_problem->m_linear_solver = new_<PardisoLDLT>();
#else
new_problem->m_linear_solver = new_<SimplicialLDLT>();
#endif
return new_problem;
}
std::string solver_name() const
{
return m_linear_solver->solver_name();
}
void remove_inactive_objectives()
{
index nb_active_elements_f = 0;
for (const auto& element : m_elements_f) {
if (element->is_active()) {
nb_active_elements_f += 1;
}
}
ElementsF elements_f(nb_active_elements_f);
std::vector<index> element_f_nb_variables(nb_active_elements_f);
std::vector<std::vector<Index>> element_f_variable_indices(nb_active_elements_f);
index max_element_n = 0;
index j = 0;
for (index i = 0; i < length(m_elements_f); i++) {
if (!m_elements_f[i]->is_active()) {
continue;
}
max_element_n = std::max(max_element_n, m_elements_f[i]->nb_variables());
element_f_nb_variables[j] = m_element_f_nb_variables[i];
element_f_variable_indices[j] = std::move(m_element_f_variable_indices[i]);
elements_f[j] = std::move(m_elements_f[i]);
j += 1;
}
m_max_element_n = max_element_n;
m_element_f_nb_variables = std::move(element_f_nb_variables);
m_element_f_variable_indices = std::move(element_f_variable_indices);
m_elements_f = std::move(elements_f);
}
void remove_inactive_constraints()
{
index nb_active_elements_g = 0;
for (const auto& element : m_elements_g) {
if (element->is_active()) {
nb_active_elements_g += 1;
}
}
ElementsG elements_g(nb_active_elements_g);
std::vector<index> element_g_nb_variables(nb_active_elements_g);
std::vector<index> element_g_nb_equations(nb_active_elements_g);
std::vector<std::vector<Index>> element_g_equation_indices(nb_active_elements_g);
std::vector<std::vector<Index>> element_g_variable_indices(nb_active_elements_g);
index max_element_n = 0;
index max_element_m = 0;
index j = 0;
for (index i = 0; i < length(m_elements_g); i++) {
if (!m_elements_g[i]->is_active()) {
continue;
}
max_element_n = std::max(max_element_n, m_elements_g[i]->nb_variables());
max_element_m = std::max(max_element_m, m_elements_g[i]->nb_equations());
element_g_nb_variables[j] = element_g_nb_variables[i];
element_g_nb_equations[j] = element_g_nb_equations[i];
element_g_variable_indices[j] = std::move(element_g_variable_indices[i]);
element_g_equation_indices[j] = std::move(element_g_equation_indices[i]);
elements_g[j] = std::move(m_elements_g[i]);
j += 1;
}
m_max_element_n = max_element_n;
m_max_element_m = max_element_m;
m_element_g_nb_equations = std::move(element_g_nb_equations);
m_element_g_nb_variables = std::move(element_g_nb_variables);
m_element_g_equation_indices = std::move(element_g_equation_indices);
m_element_g_variable_indices = std::move(element_g_variable_indices);
m_elements_g = std::move(elements_g);
}
void remove_inactive_elements()
{
remove_inactive_objectives();
remove_inactive_constraints();
}
public: // methods: model properties
Pointer<LinearSolver> linear_solver() const noexcept
{
return m_linear_solver;
}
void set_linear_solver(const Pointer<LinearSolver> value)
{
if (value == nullptr) {
throw std::invalid_argument("Value is null");
}
m_linear_solver = value;
}
int nb_threads() const noexcept
{
return m_nb_threads;
}
void set_nb_threads(const int value) noexcept
{
m_nb_threads = value;
}
int grainsize() const noexcept
{
return m_grainsize;
}
void set_grainsize(const int value) noexcept
{
m_grainsize = value;
}
bool is_constrained() const noexcept
{
return !m_equations.empty();
}
index nb_elements_f() const noexcept
{
return length(m_elements_f);
}
index nb_elements_g() const noexcept
{
return length(m_elements_g);
}
const Equations& equations() const noexcept
{
return m_equations;
}
const Variables& variables() const noexcept
{
return m_variables;
}
index nb_equations() const noexcept
{
return length(m_equations);
}
index nb_variables() const noexcept
{
return length(m_variables);
}
const Pointer<Variable>& variable(const index index) const
{
return m_variables.at(index);
}
index variable_index(const Pointer<Variable>& variable) const
{
const auto it = m_variable_indices.find(variable);
if (it == m_variable_indices.end()) {
return -1;
}
return it->second;
}
const Pointer<Equation>& equation(const index index) const
{
return m_equations.at(index);
}
index equation_index(const Pointer<Equation>& equation) const
{
const auto it = m_equation_indices.find(equation);
if (it == m_equation_indices.end()) {
return -1;
}
return it->second;
}
public: // methods: input
Vector x() const
{
Vector result(nb_variables());
for (index i = 0; i < length(result); i++) {
result(i) = variable(i)->value();
}
return result;
}
void set_x(Ref<const Vector> value) const
{
if (length(value) != nb_variables()) {
throw std::runtime_error("Invalid size");
}
for (index i = 0; i < length(value); i++) {
variable(i)->set_value(value[i]);
}
}
void set_x(double* const value) const
{
set_x(Map<const Vector>(value, nb_variables()));
}
void add_x(Ref<const Vector> delta) const
{
if (length(delta) != nb_variables()) {
throw std::runtime_error("Invalid size");
}
for (index i = 0; i < length(delta); i++) {
variable(i)->value() += delta[i];
}
}
void add_x(double* const delta) const
{
add_x(Map<const Vector>(delta, nb_variables()));
}
void sub_x(Ref<const Vector> delta) const
{
if (length(delta) != nb_variables()) {
throw std::runtime_error("Invalid size");
}
for (index i = 0; i < length(delta); i++) {
variable(i)->value() -= delta[i];
}
}
void sub_x(double* const delta) const
{
sub_x(Map<const Vector>(delta, nb_variables()));
}
Vector variable_multipliers() const
{
Vector result(nb_variables());
for (index i = 0; i < length(result); i++) {
result(i) = variable(i)->multiplier();
}
return result;
}
void set_variable_multipliers(Ref<const Vector> value) const
{
if (length(value) != nb_variables()) {
throw std::runtime_error("Invalid size");
}
for (index i = 0; i < length(value); i++) {
variable(i)->set_multiplier(value[i]);
}
}
void set_variable_multipliers(double* const value) const
{
set_variable_multipliers(Map<const Vector>(value, nb_variables()));
}
Vector equation_multipliers() const
{
Vector result(nb_equations());
for (index i = 0; i < length(result); i++) {
result(i) = equation(i)->multiplier();
}
return result;
}
void set_equation_multipliers(Ref<const Vector> value) const
{
if (length(value) != nb_equations()) {
throw std::runtime_error("Invalid size");
}
for (index i = 0; i < length(value); i++) {
equation(i)->set_multiplier(value[i]);
}
}
void set_equation_multipliers(double* const value) const
{
set_equation_multipliers(Map<const Vector>(value, nb_equations()));
}
double sigma() const noexcept
{
return m_sigma;
}
void set_sigma(const double value) noexcept
{
m_sigma = value;
}
std::vector<std::pair<double, double>> equation_bounds() const
{
std::vector<std::pair<double, double>> bounds(nb_equations());
for (index i = 0; i < nb_equations(); i++) {
bounds[i] = {equation(i)->lower_bound(), equation(i)->upper_bound()};
}
return bounds;
}
std::vector<std::pair<double, double>> variable_bounds() const
{
std::vector<std::pair<double, double>> bounds(nb_variables());
for (index i = 0; i < nb_variables(); i++) {
bounds[i] = {variable(i)->lower_bound(), variable(i)->upper_bound()};
}
return bounds;
}
public: // methods: output values
Ref<Vector> values() noexcept
{
return Map<Vector>(m_data.values().data(), m_data.values().size());
}
Ref<const Vector> values() const noexcept
{
return Map<const Vector>(m_data.values().data(), m_data.values().size());
}
public: // methods: output f
double f() const noexcept
{
return m_data.f();
}
void set_f(const double value) noexcept
{
m_data.f() = value;
}
public: // methods: output g
Ref<Vector> g() noexcept
{
return m_data.g();
}
Ref<const Vector> g() const noexcept
{
return m_data.g();
}
double& g(const index index)
{
return m_data.g(index);
}
double g(const index index) const
{
return m_data.g(index);
}
public: // methods: output df
Ref<Vector> df() noexcept
{
return m_data.df();
}
Ref<const Vector> df() const noexcept
{
return m_data.df();
}
double& df(const index index)
{
return m_data.df(index);
}
double df(const index index) const
{
return m_data.df(index);
}
public: // methods: output dg
auto structure_dg() const
{
return m_structure_dg;
}
Ref<const Sparse> dg() const noexcept
{
return Map<const Sparse>(nb_equations(), nb_variables(), m_structure_dg.nb_nonzeros(), m_structure_dg.ia().data(), m_structure_dg.ja().data(), m_data.dg().data());
}
Ref<Vector> dg_values() noexcept
{
return m_data.dg();
}
Ref<const Vector> dg_values() const noexcept
{
return m_data.dg();
}
const std::vector<int>& dg_indptr() const noexcept
{
return m_structure_dg.ia();
}
const std::vector<int>& dg_indices() const noexcept
{
return m_structure_dg.ja();
}
double& dg(const index index)
{
return m_data.dg_value(index);
}
double dg(const index index) const
{
return m_data.dg_value(index);
}
double& dg(const index row, const index col)
{
const index index = m_structure_dg.get_index(row, col);
return m_data.dg_value(index);
}
double dg(const index row, const index col) const
{
const index index = m_structure_dg.get_index(row, col);
return m_data.dg_value(index);
}
public: // methods: output hm
auto structure_hm() const
{
return m_structure_hm;
}
Map<const Sparse> hm() const noexcept
{
return Map<const Sparse>(m_structure_hm.rows(), m_structure_hm.cols(), m_structure_hm.nb_nonzeros(), m_structure_hm.ia().data(), m_structure_hm.ja().data(), m_data.hm().data());
}
Ref<Vector> hm_values() noexcept
{
return m_data.hm();
}
Ref<const Vector> hm_values() const noexcept
{
return m_data.hm();
}
const std::vector<int>& hm_indptr() const noexcept
{
return m_structure_hm.ia();
}
const std::vector<int>& hm_indices() const noexcept
{
return m_structure_hm.ja();
}
double& hm(const index index)
{
return m_data.hm_value(index);
}
double hm(const index index) const
{
return m_data.hm_value(index);
}
double& hm(const index row, const index col)
{
index index = m_structure_hm.get_index(row, col);
return m_data.hm_value(index);
}
double hm(const index row, const index col) const
{
index index = m_structure_hm.get_index(row, col);
return m_data.hm_value(index);
}
public: // methods: python
template <typename TModule>
static void register_python(TModule& m)
{
namespace py = pybind11;
using namespace pybind11::literals;
using Holder = Pointer<Type>;
const std::string name = "Problem";
py::object scipy_sparse = py::module::import("scipy.sparse");
py::object csr_matrix = scipy_sparse.attr("csr_matrix");
py::class_<Type, Holder>(m, name.c_str())
// constructors
.def(py::init<ElementsF, ElementsG, int, int>(), "objective"_a = py::list(), "constraints"_a = py::list(),
"nb_threads"_a = 1, "grainsize"_a = 100, py::keep_alive<1, 2>(), py::keep_alive<1, 3>())
// read-only properties
.def_property_readonly("is_constrained", &Type::is_constrained)
.def_property_readonly("equations", &Type::equations)
.def_property_readonly("variables", &Type::variables)
.def_property_readonly("g", py::overload_cast<>(&Type::g))
.def_property_readonly("df", py::overload_cast<>(&Type::df))
.def_property_readonly("dg", [=](Type& self) {
return csr_matrix(
std::make_tuple(self.dg_values(), self.dg_indices(), self.dg_indptr()),
std::make_pair(self.nb_equations(), self.nb_variables()))
.release();
})
.def_property_readonly("structure_dg", &Type::structure_dg)
.def_property_readonly("dg_values", py::overload_cast<>(&Type::dg_values))
.def_property_readonly("dg_indptr", &Type::dg_indptr)
.def_property_readonly("dg_indices", &Type::dg_indices)
.def_property_readonly("hm", [=](Type& self) {
return csr_matrix(
std::make_tuple(self.hm_values(), self.hm_indices(), self.hm_indptr()),
std::make_pair(self.nb_variables(), self.nb_variables()))
.release();
})
.def_property_readonly("general_hm", [=](Type& self) {
const auto [structure, values] = self.structure_hm().to_general(self.hm_values());
return csr_matrix(
std::make_tuple(values, structure.ja(), structure.ia()),
std::make_pair(self.nb_variables(), self.nb_variables()),
"copy"_a=true)
.release();
})
.def_property_readonly("structure_hm", &Type::structure_hm)
.def_property_readonly("hm_values", py::overload_cast<>(&Type::hm_values))
.def_property_readonly("hm_indptr", &Type::hm_indptr)
.def_property_readonly("hm_indices", &Type::hm_indices)
.def_property_readonly("hm_norm_inf", &Type::hm_norm_inf)
.def_property_readonly("nb_equations", &Type::nb_equations)
.def_property_readonly("nb_variables", &Type::nb_variables)
.def_property_readonly("values", py::overload_cast<>(&Type::values))
.def_property_readonly("equation_bounds", &Type::equation_bounds)
.def_property_readonly("variable_bounds", &Type::variable_bounds)
.def_property_readonly("nb_elements_f", &Type::nb_elements_f)
.def_property_readonly("nb_elements_g", &Type::nb_elements_g)
// properties
.def_property("linear_solver", &Type::linear_solver, &Type::set_linear_solver)
.def_property("f", &Type::f, &Type::set_f)
.def_property("nb_threads", &Type::nb_threads, &Type::set_nb_threads)
.def_property("grainsize", &Type::grainsize, &Type::set_grainsize)
.def_property("sigma", &Type::sigma, &Type::set_sigma)
.def_property("hm_diagonal", &Type::hm_diagonal, &Type::set_hm_diagonal)
.def_property("x", py::overload_cast<>(&Type::x, py::const_), py::overload_cast<Ref<const Vector>>(&Type::set_x, py::const_))
.def_property("variable_multipliers", py::overload_cast<>(&Type::variable_multipliers, py::const_), py::overload_cast<Ref<const Vector>>(&Type::set_variable_multipliers, py::const_))
.def_property("equation_multipliers", py::overload_cast<>(&Type::equation_multipliers, py::const_), py::overload_cast<Ref<const Vector>>(&Type::set_equation_multipliers, py::const_))
// methods
.def("add_x", py::overload_cast<Ref<const Vector>>(&Type::add_x, py::const_))
.def("sub_x", py::overload_cast<Ref<const Vector>>(&Type::sub_x, py::const_))
.def("variable_index", &Type::variable_index, "variable"_a)
.def("equation_index", &Type::equation_index, "equation"_a)
.def("clone", &Type::clone)
.def("remove_inactive_elements", &Type::remove_inactive_elements)
.def("compute", &Type::compute<true>, "order"_a = 2, py::call_guard<py::gil_scoped_release>())
.def("hm_add_diagonal", &Type::hm_add_diagonal, "value"_a)
.def("hm_inv_v", &Type::hm_inv_v, py::call_guard<py::gil_scoped_release>())
.def("hm_v", &Type::hm_v)
.def("f_of", [](Type& self, Ref<const Vector> x) {
self.set_x(x);
self.compute<false>(0);
return self.f();
},
"x"_a, py::call_guard<py::gil_scoped_release>())
.def("g_of", [](Type& self, Ref<const Vector> x) {
self.set_x(x);
self.compute<false>(0);
return Vector(self.g());
},
"x"_a, py::call_guard<py::gil_scoped_release>())
.def("df_of", [](Type& self, Ref<const Vector> x) {
self.set_x(x);
self.compute<false>(1);
return Vector(self.df());
},
"x"_a, py::call_guard<py::gil_scoped_release>())
.def("dg_of", [=](Type& self, Ref<const Vector> x) {
self.set_x(x);
self.compute<false>(1);
return csr_matrix(
std::make_tuple(self.dg_values(), self.dg_indices(), self.dg_indptr()),
std::make_pair(self.nb_equations(), self.nb_variables()))
.release();
},
"x"_a, py::call_guard<py::gil_scoped_release>())
.def("hm_of", [=](Type& self, Ref<const Vector> x) {
self.set_x(x);
self.compute<false>(2);
return csr_matrix(
std::make_tuple(self.hm_values(), self.hm_indices(), self.hm_indptr()),
std::make_pair(self.nb_variables(), self.nb_variables()))
.release();
},
"x"_a, py::call_guard<py::gil_scoped_release>())
.def("hm_v_of", [=](Type& self, Ref<const Vector> x, Ref<const Vector> p) {
self.set_x(x);
self.compute<false>(2);
return self.hm_v(p);
},
"x"_a, "p"_a, py::call_guard<py::gil_scoped_release>())
.def("scale", &Type::scale, "factor"_a);
}
};
} // namespace eqlib
|
deconvolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.row(i);
float* outptr0 = outptr;
float* outptr1 = outptr + outw;
float* outptr2 = outptr + outw * 2;
int j = 0;
#if __ARM_NEON
for (; j + 3 < w; j += 4)
{
float32x4_t _v = vld1q_f32(r0);
#if 0 // bad compiler generate slow instructions :( \
// 0
float32x4_t _out00 = vld1q_f32(outptr0 + 0);
_out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0);
float32x4_t _out01 = vmulq_lane_f32(_v, vget_low_f32(_k0), 1);
// ext
float32x4_t _zero_out01 = vdupq_n_f32(0.f);
_zero_out01 = vextq_f32(_zero_out01, _out01, 3);
_out00 = vaddq_f32(_out00, _zero_out01);
//
float32x2_t _out00low = vget_low_f32(_out00);
float32x2_t _out00high = vget_high_f32(_out00);
_out00high = vmla_lane_f32(_out00high, vget_low_f32(_v), vget_high_f32(_k0), 0);
_out00 = vcombine_f32(_out00low, _out00high);
vst1q_f32(outptr0 + 0, _out00);
//
float32x2_t _out02high = vld1_f32(outptr0 + 4);
float32x2_t _out01_zero = vext_f32(vget_high_f32(_out01), vget_low_f32(_zero_out01), 1);
_out02high = vadd_f32(_out02high, _out01_zero);
_out02high = vmla_lane_f32(_out02high, vget_high_f32(_v), vget_high_f32(_k0), 0);
vst1_f32(outptr0 + 4, _out02high);
// 1
float32x4_t _out10 = vld1q_f32(outptr1 + 0);
_out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0);
float32x4_t _out11 = vmulq_lane_f32(_v, vget_low_f32(_k1), 1);
// ext
float32x4_t _zero_out11 = vdupq_n_f32(0.f);
_zero_out11 = vextq_f32(_zero_out11, _out11, 3);
_out10 = vaddq_f32(_out10, _zero_out11);
//
float32x2_t _out10low = vget_low_f32(_out10);
float32x2_t _out10high = vget_high_f32(_out10);
_out10high = vmla_lane_f32(_out10high, vget_low_f32(_v), vget_high_f32(_k1), 0);
_out10 = vcombine_f32(_out10low, _out10high);
vst1q_f32(outptr1 + 0, _out10);
//
float32x2_t _out12high = vld1_f32(outptr1 + 4);
float32x2_t _out11_zero = vext_f32(vget_high_f32(_out11), vget_low_f32(_zero_out11), 1);
_out12high = vadd_f32(_out12high, _out11_zero);
_out12high = vmla_lane_f32(_out12high, vget_high_f32(_v), vget_high_f32(_k1), 0);
vst1_f32(outptr1 + 4, _out12high);
// 2
float32x4_t _out20 = vld1q_f32(outptr2 + 0);
_out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0);
float32x4_t _out21 = vmulq_lane_f32(_v, vget_low_f32(_k2), 1);
// ext
float32x4_t _zero_out21 = vdupq_n_f32(0.f);
_zero_out21 = vextq_f32(_zero_out21, _out21, 3);
_out20 = vaddq_f32(_out20, _zero_out21);
//
float32x2_t _out20low = vget_low_f32(_out20);
float32x2_t _out20high = vget_high_f32(_out20);
_out20high = vmla_lane_f32(_out20high, vget_low_f32(_v), vget_high_f32(_k2), 0);
_out20 = vcombine_f32(_out20low, _out20high);
vst1q_f32(outptr2 + 0, _out20);
//
float32x2_t _out22high = vld1_f32(outptr2 + 4);
float32x2_t _out21_zero = vext_f32(vget_high_f32(_out21), vget_low_f32(_zero_out21), 1);
_out22high = vadd_f32(_out22high, _out21_zero);
_out22high = vmla_lane_f32(_out22high, vget_high_f32(_v), vget_high_f32(_k2), 0);
vst1_f32(outptr2 + 4, _out22high);
#else
//
float32x4_t _out00 = vld1q_f32(outptr0 + 0);
_out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0);
vst1q_f32(outptr0 + 0, _out00);
float32x4_t _out01 = vld1q_f32(outptr0 + 1);
_out01 = vmlaq_lane_f32(_out01, _v, vget_low_f32(_k0), 1);
vst1q_f32(outptr0 + 1, _out01);
float32x4_t _out02 = vld1q_f32(outptr0 + 2);
_out02 = vmlaq_lane_f32(_out02, _v, vget_high_f32(_k0), 0);
vst1q_f32(outptr0 + 2, _out02);
//
float32x4_t _out10 = vld1q_f32(outptr1 + 0);
_out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0);
vst1q_f32(outptr1 + 0, _out10);
float32x4_t _out11 = vld1q_f32(outptr1 + 1);
_out11 = vmlaq_lane_f32(_out11, _v, vget_low_f32(_k1), 1);
vst1q_f32(outptr1 + 1, _out11);
float32x4_t _out12 = vld1q_f32(outptr1 + 2);
_out12 = vmlaq_lane_f32(_out12, _v, vget_high_f32(_k1), 0);
vst1q_f32(outptr1 + 2, _out12);
//
float32x4_t _out20 = vld1q_f32(outptr2 + 0);
_out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0);
vst1q_f32(outptr2 + 0, _out20);
float32x4_t _out21 = vld1q_f32(outptr2 + 1);
_out21 = vmlaq_lane_f32(_out21, _v, vget_low_f32(_k2), 1);
vst1q_f32(outptr2 + 1, _out21);
float32x4_t _out22 = vld1q_f32(outptr2 + 2);
_out22 = vmlaq_lane_f32(_out22, _v, vget_high_f32(_k2), 0);
vst1q_f32(outptr2 + 2, _out22);
#endif
r0 += 4;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
r0++;
outptr0++;
outptr1++;
outptr2++;
}
}
}
}
}
static void deconv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.row(i * 2);
float* outptr0 = outptr;
float* outptr1 = outptr0 + outw;
float* outptr2 = outptr1 + outw;
int j = 0;
#if __ARM_NEON
for (; j + 3 < w; j += 4)
{
float32x4_t _v = vld1q_f32(r0);
// out row 0
float32x4_t _out00 = vmulq_lane_f32(_v, vget_low_f32(_k0), 0); // 0,2,4,6
float32x4_t _out01 = vmulq_lane_f32(_v, vget_low_f32(_k0), 1); // 1,3,5,7
float32x4_t _out02 = vmulq_lane_f32(_v, vget_high_f32(_k0), 0); // 2,4,6,8
float32x4x2_t _out0 = vld2q_f32(outptr0);
_out0.val[0] = vaddq_f32(_out0.val[0], _out00); // 0,2,4,6
_out0.val[1] = vaddq_f32(_out0.val[1], _out01); // 1,3,5,7
vst2q_f32(outptr0, _out0);
_out0 = vld2q_f32(outptr0 + 2);
_out0.val[0] = vaddq_f32(_out0.val[0], _out02); // 2,4,6,8
vst2q_f32(outptr0 + 2, _out0);
// out row 1
float32x4_t _out10 = vmulq_lane_f32(_v, vget_low_f32(_k1), 0); // 0,2,4,6
float32x4_t _out11 = vmulq_lane_f32(_v, vget_low_f32(_k1), 1); // 1,3,5,7
float32x4_t _out12 = vmulq_lane_f32(_v, vget_high_f32(_k1), 0); // 2,4,6,8
float32x4x2_t _out1 = vld2q_f32(outptr1);
_out1.val[0] = vaddq_f32(_out1.val[0], _out10); // 0,2,4,6
_out1.val[1] = vaddq_f32(_out1.val[1], _out11); // 1,3,5,7
vst2q_f32(outptr1, _out1);
_out1 = vld2q_f32(outptr1 + 2);
_out1.val[0] = vaddq_f32(_out1.val[0], _out12); // 2,4,6,8
vst2q_f32(outptr1 + 2, _out1);
// out row 2
float32x4_t _out20 = vmulq_lane_f32(_v, vget_low_f32(_k2), 0); // 0,2,4,6
float32x4_t _out21 = vmulq_lane_f32(_v, vget_low_f32(_k2), 1); // 1,3,5,7
float32x4_t _out22 = vmulq_lane_f32(_v, vget_high_f32(_k2), 0); // 2,4,6,8
float32x4x2_t _out2 = vld2q_f32(outptr2);
_out2.val[0] = vaddq_f32(_out2.val[0], _out20); // 0,2,4,6
_out2.val[1] = vaddq_f32(_out2.val[1], _out21); // 1,3,5,7
vst2q_f32(outptr2, _out2);
_out2 = vld2q_f32(outptr2 + 2);
_out2.val[0] = vaddq_f32(_out2.val[0], _out22); // 2,4,6,8
vst2q_f32(outptr2 + 2, _out2);
r0 += 4;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
r0++;
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
}
}
}
}
}
|
tree.h | /*
* tree.h
*
* Created on: Oct 9, 2016
* Author: Yimin Zhong
*/
#ifndef FMM_TREE_H
#define FMM_TREE_H
#include "node.h"
#include "measure.h"
#include <chrono>
#include <fstream>
#include <cassert>
#include <queue>
#ifdef RUN_OMP
#include "omp.h"
#endif
class tree {
public:
vector<node> dict;
int maxId;
int root;
int nSource;
int nTarget;
int rank;
int maxLevel;
vector<point> sourceTree;
vector<point> targetTree;
point center;
point radius;
tree() {
maxId = -1;
root = -1;
nSource = 0;
nTarget = 0;
rank = 0;
maxLevel = 0;
}
~tree() {
}
void populate(vector<point>& _source, vector<point>& _target, int _nSource, int _nTarget, int _rank, int _maxLevel);
void output(std::string file);
protected:
void getCenterRadius(vector<point>& _source);
void assignChildren(int _id, int _maxLevel);
void buildTree();
void buildNode(int _id, point& min_p, point& max_p);
int findNode(int _id, point& p);
bool isAdjacent(int _aId, int _bId);
};
void tree::populate(vector<point> &_source, vector<point> &_target, int _nSource, int _nTarget, int _rank,
int _maxLevel) {
this->sourceTree = _source;
this->targetTree = _target;
this->nSource = _nSource;
this->nTarget = _nTarget;
this->maxLevel = 0;
this->rank = _rank;
getCenterRadius(_source);
this->root = 0;
this->dict.push_back(node(0, 0));
this->maxId = root;
dict[root].nSource = nSource;
dict[root].nTarget = nTarget;
dict[root].center = center;
dict[root].radius = radius;
dict[root].sourceIndex.resize((unsigned long) nSource);
dict[root].targetIndex.resize((unsigned long) nTarget);
for (int i = 0; i < nSource; ++i) {
dict[root].sourceIndex[i] = i;
}
for (int i = 0; i < nTarget; ++i) {
dict[root].targetIndex[i] = i;
}
RUN("initialization", assignChildren(root, _maxLevel));
RUN("assign lists" , buildTree());
}
void tree::getCenterRadius(vector<point> &_source) {
assert(_source.size() > 0);
double x_max = _source[0].x;
double x_min = _source[0].x;
double y_max = _source[0].y;
double y_min = _source[0].y;
double z_max = _source[0].z;
double z_min = _source[0].z;
for (size_t i = 0; i < _source.size(); ++i) {
x_max = std::max(x_max, _source[i].x);
y_max = std::max(y_max, _source[i].y);
z_max = std::max(z_max, _source[i].z);
x_min = std::min(x_min, _source[i].x);
y_min = std::min(y_min, _source[i].y);
z_min = std::min(z_min, _source[i].z);
}
this->center.x = (x_max + x_min)/2.0;
this->center.y = (y_max + y_min)/2.0;
this->center.z = (z_max + z_min)/2.0;
this->radius.x = (x_max - x_min)/2.0;
this->radius.y = (y_max - y_min)/2.0;
this->radius.z = (z_max - z_min)/2.0;
}
void tree::assignChildren(int _id, int _maxLevel) {
/*
* when assigning children nodes, the points are not assigned due to storage.
*
* Now the limitation of nodes is around 2^24.
*/
assert(root != -1); // check tree is non-empty
// check source
if (dict[_id].nSource == 0) {
dict[_id].isLeaf = true;
dict[_id].isEmpty = true;
}
else {
// divide
if ((dict[_id].nSource <= rank) || (dict[_id].nLevel == _maxLevel)) {
dict[_id].isLeaf = true;
if (maxLevel < dict[_id].nLevel) {
maxLevel = dict[_id].nLevel;
}
}
else {
// not a leaf
for (int i = 0; i < 8; ++i) {
maxId += 1;
dict[_id].child[i] = maxId;
dict.push_back(node(dict[_id].nLevel + 1, i));
dict[maxId].parent = _id;
dict[maxId].center.x = dict[_id].center.x + ((i & 1) - 0.5) * dict[_id].radius.x;
dict[maxId].center.y = dict[_id].center.y + (((i >> 1) & 1) - 0.5) * dict[_id].radius.y;
dict[maxId].center.z = dict[_id].center.z + ((i >> 2) - 0.5) * dict[_id].radius.z;
dict[maxId].radius.x = dict[_id].radius.x * 0.5;
dict[maxId].radius.y = dict[_id].radius.y * 0.5;
dict[maxId].radius.z = dict[_id].radius.z * 0.5;
dict[maxId].nSource = 0;
dict[maxId].nTarget = 0;
}
/*
* can be accelerated by **reduce**
*/
for (int i = 0; i < dict[_id].nSource; ++i) {
int index = dict[_id].sourceIndex[i];
int z_bit = sourceTree[index].z < dict[_id].center.z ? 0:1;
int y_bit = sourceTree[index].y < dict[_id].center.y ? 0:1;
int x_bit = sourceTree[index].x < dict[_id].center.x ? 0:1;
int childIndex = 4 * z_bit + 2 * y_bit + x_bit;
int childId = dict[_id].child[childIndex];
dict[childId].sourceIndex.push_back(index);
dict[childId].nSource += 1;
}
/*
* can be accelerated by **reduce**
*/
for (int i = 0; i < dict[_id].nTarget; ++i) {
int index = dict[_id].targetIndex[i];
int z_bit = targetTree[index].z < dict[_id].center.z ? 0:1;
int y_bit = targetTree[index].y < dict[_id].center.y ? 0:1;
int x_bit = targetTree[index].x < dict[_id].center.x ? 0:1;
int childIndex = 4 * z_bit + 2 * y_bit + x_bit;
int childId = dict[_id].child[childIndex];
dict[childId].targetIndex.push_back(index);
dict[childId].nTarget += 1;
}
for (int i = 0; i < 8; ++i) {
assignChildren(dict[_id].child[i], _maxLevel);
}
}
}
}
void tree::buildTree() {
point min_p(dict[root].center.x - dict[root].radius.x,
dict[root].center.y - dict[root].radius.y,
dict[root].center.z - dict[root].radius.z);
point max_p(dict[root].center.x + dict[root].radius.x,
dict[root].center.y + dict[root].radius.y,
dict[root].center.z + dict[root].radius.z);
size_t i;
#ifdef RUN_OMP
#pragma omp parallel for private(i) shared(min_p, max_p) schedule(dynamic)
#endif
for (i = 0; i < dict.size(); ++i) {
buildNode(i, min_p, max_p);
}
}
void tree::buildNode(int _id, point &min_p, point &max_p) {
node& n = dict[_id];
n.uList.clear();
n.vList.clear();
n.wList.clear();
n.xList.clear();
// not root
if (n.parent != -1) {
node& pn = dict[n.parent];
double dx = n.radius.x;
double dy = n.radius.y;
double dz = n.radius.z;
double xs = pn.center.x - dx;
double ys = pn.center.y - dy;
double zs = pn.center.z - dz;
point cur;
for (int x_id = -2; x_id < 4; x_id++) {
for (int y_id = -2; y_id < 4; y_id++) {
for (int z_id = -2; z_id < 4; z_id++) {
cur.x = xs + 2 * x_id * dx;
cur.y = ys + 2 * y_id * dy;
cur.z = zs + 2 * z_id * dz;
// check box and not itself.
if (cur <= max_p && cur >= min_p && !(cur == n.center)) {
//find node.
int curId = findNode(0, cur);
bool adj = isAdjacent(_id, curId);
node& curNode = dict[curId];
if (curNode.nLevel < n.nLevel) {
if (adj) {
if (curNode.isLeaf) {
n.uList.insert(curId);
}
}
else {
n.xList.insert(curId);
}
}
if (curNode.nLevel == n.nLevel) {
if (!adj) {
n.vList.insert(curId);
}
else {
if (n.isLeaf) {
std::queue<int> rest;
rest.push(curId);
while (!rest.empty()) {
int frontId = rest.front(); rest.pop();
node& frontNode = dict[frontId];
if (!isAdjacent(frontId, _id)) {
n.wList.insert(frontId);
}
else {
if (frontNode.isLeaf) {
n.uList.insert(frontId);
}
else {
for (int i = 0; i < 8; ++i) {
rest.push(frontNode.child[i]);
}
}
}
}
}
}
}
}
}
}
}
}
if (n.isLeaf) {
n.uList.insert(_id);
}
n.nUList = (int) n.uList.size();
n.nWList = (int) n.wList.size();
n.nVList = (int) n.vList.size();
n.nXList = (int) n.xList.size();
}
int tree::findNode(int _id, point &p) {
node& n = dict[_id];
if (n.center == p) return _id;
else {
if (n.isLeaf) {
return _id;
}
else {
int x_bit = n.center.x > p.x ? 0 : 1;
int y_bit = n.center.y > p.y ? 0 : 1;
int z_bit = n.center.z > p.z ? 0 : 1;
int id = 4 * z_bit + 2 * y_bit + x_bit;
return findNode(n.child[id], p);
}
}
}
bool tree::isAdjacent(int _aId, int _bId) {
node& nA = dict[_aId];
node& nB = dict[_bId];
double diff_x = fabs(nA.center.x - nB.center.x);
double diff_y = fabs(nA.center.y - nB.center.y);
double diff_z = fabs(nA.center.z - nB.center.z);
double r_x = fabs(nA.radius.x + nB.radius.x);
double r_y = fabs(nA.radius.y + nB.radius.y);
double r_z = fabs(nA.radius.z + nB.radius.z);
bool rdx = r_x >= diff_x - __eps;
bool rdy = r_y >= diff_y - __eps;
bool rdz = r_z >= diff_z - __eps;
bool x_adj = (fabs(diff_x - r_x) < __eps) && (rdy && rdz);
bool y_adj = (fabs(diff_y - r_y) < __eps) && (rdx && rdz);
bool z_adj = (fabs(diff_z - r_z) < __eps) && (rdy && rdx);
return x_adj || y_adj || z_adj;
}
void tree::output(std::string file) {
std::ofstream file_stream(file);
if (file_stream.is_open()) {
for (size_t i = 0; i < dict.size(); ++i) {
file_stream << dict[i].center.x << " "
<< dict[i].center.y << " "
<< dict[i].center.z << " "
<< dict[i].radius.x << " "
<< dict[i].radius.y << " "
<< dict[i].radius.z << " "
<< dict[i].nVList << " " << dict[i].nXList << " " << dict[i].nUList <<" "<< dict[i].nWList <<"\n";
}
file_stream.close();
}
else {
std::cout << "cannot open file: " << file << std::endl;
}
}
#endif //FMM_TREE_H
|
ExceptionPropagator.h | #if !defined EXCEPTION_PROPAGATOR_H
#define EXCEPTION_PROPAGATOR_H
// Experimental class to propagate exceptions around OpenMP pragmas (and other
// constructs) that cannot cope with exceptions. Exceptions thrown inside a lambda
// function that is given to the () operator are propagated and rethrown when the
// progagation object is destroyed. Example use case:
//
// ExceptionPropagator ep;
//
// #pragma omp parallel for
// for (int i = 0; i < 10000; i ++)
// if (!ep) // finish loop ASAP if exception is pending
// ep([&] () {
// throw 42; // continue with code that might throw exceptions
// });
//
// // exception is rethrown at scope exit of ep
#include <atomic>
#include <exception>
class ExceptionPropagator
{
public:
ExceptionPropagator()
:
propagateException(ATOMIC_FLAG_INIT)
{
}
~ExceptionPropagator() noexcept(false)
{
if (exception != nullptr && !std::uncaught_exception())
std::rethrow_exception(exception);
}
template <typename T> void operator () (const T &func)
{
try {
func();
} catch (...) {
if (!atomic_flag_test_and_set(&propagateException))
exception = std::current_exception();
}
}
operator bool () const // returns true iff exception pending
{
return exception != nullptr;
}
private:
std::atomic_flag propagateException;
std::exception_ptr exception;
};
#endif
|
residualbased_newton_raphson_mpc_contact_strategy.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY)
#define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY
/* System Includes */
/* External Includes */
/* Project includes */
#include "contact_structural_mechanics_application_variables.h"
#include "includes/kratos_parameters.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/variables.h"
// Strategies
#include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h"
// Contact criteria
#include "custom_strategies/custom_convergencecriterias/mpc_contact_criteria.h"
// Utilities
#include "utilities/variable_utils.h"
#include "utilities/color_utilities.h"
#include "utilities/math_utils.h"
#include "utilities/atomic_utilities.h"
// // Processes
// #include "processes/fast_transfer_between_model_parts_process.h"
namespace Kratos {
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedNewtonRaphsonMPCContactStrategy
* @ingroup ContactStructuralMechanicsApplication
* @brief Contact Newton Raphson class
* @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace,
class TDenseSpace, // = DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedNewtonRaphsonMPCContactStrategy :
public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonMPCContactStrategy );
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType;
typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType;
typedef MPCContactCriteria<TSparseSpace, TDenseSpace> TMPCContactCriteriaType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ModelPart::MasterSlaveConstraintContainerType ConstraintArrayType;
typedef std::size_t IndexType;
typedef std::size_t SizeType;
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedNewtonRaphsonMPCContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})")
)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ),
mThisParameters(ThisParameters)
{
KRATOS_TRY;
// We create the contact criteria
mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedNewtonRaphsonMPCContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})")
)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag),
mThisParameters(ThisParameters)
{
KRATOS_TRY;
// We create the contact criteria
mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedNewtonRaphsonMPCContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})")
)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ),
mThisParameters(ThisParameters)
{
KRATOS_TRY;
// We create the contact criteria
mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* Destructor.
*/
~ResidualBasedNewtonRaphsonMPCContactStrategy() override
= default;
//******************** OPERATIONS ACCESSIBLE FROM THE INPUT: ************************//
//***********************************************************************************//
/**
* @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the
* values of the solution step of interest are assumed equal to the old values
*/
void Predict() override
{
KRATOS_TRY
BaseType::Predict();
// Getting model part
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
// We get the system
TSystemMatrixType& rA = *BaseType::mpA;
TSystemVectorType& rDx = *BaseType::mpDx;
TSystemVectorType& rb = *BaseType::mpb;
// We solve the system in order to check the active set once
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
typename TSchemeType::Pointer p_scheme = BaseType::GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = BaseType::GetBuilderAndSolver();
p_builder_and_solver->BuildAndSolve(p_scheme, BaseType::GetModelPart(), rA, rDx, rb);
// Check active set
const SizeType echo_level_convergence_criteria = BaseType::mpConvergenceCriteria->GetEchoLevel();
BaseType::mpConvergenceCriteria->SetEchoLevel(0);
mpMPCContactCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb);
BaseType::mpConvergenceCriteria->SetEchoLevel(echo_level_convergence_criteria);
KRATOS_CATCH("")
}
/**
* @brief Initialization of member variables and prior operations
*/
void Initialize() override
{
KRATOS_TRY;
// Computing nodal weights
ComputeNodalWeights();
BaseType::Initialize();
KRATOS_CATCH("");
}
/**
* @brief The problem of interest is solved.
* @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(),
* SolveSolutionStep() and FinalizeSolutionStep().
* All those functions can otherwise be called separately.
*/
double Solve() override
{
this->Initialize();
this->InitializeSolutionStep();
this->Predict();
this->SolveSolutionStep();
this->FinalizeSolutionStep(); // TODO: Comment for proper work of interaction
return 0.0;
}
/**
* @brief Performs all the required operations that should be done (for each step)
* before solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
*/
void InitializeSolutionStep() override
{
// Computing nodal weights
ComputeNodalWeights();
BaseType::InitializeSolutionStep();
// // If enforcing NTN
// const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool();
// if (enforce_ntn) {
// EnforcingNTN();
// }
}
/**
* @brief Performs all the required operations that should be done (for each step)
* after solving the solution step.
*/
void FinalizeSolutionStep() override
{
KRATOS_TRY;
BaseType::FinalizeSolutionStep();
KRATOS_CATCH("");
}
/**
* @brief Solves the current step.
* @details This function returns true if a solution has been found, false otherwise.
*/
bool SolveSolutionStep() override
{
KRATOS_TRY;
bool is_converged = false;
// Getting model part
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
// We get the process info
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
if (r_process_info.Is(INTERACTION)) {
// We get the system
TSystemMatrixType& rA = *BaseType::mpA;
TSystemVectorType& rDx = *BaseType::mpDx;
TSystemVectorType& rb = *BaseType::mpb;
int inner_iteration = 0;
const SizeType echo_level_convergence_criteria = BaseType::mpConvergenceCriteria->GetEchoLevel();
while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) {
++inner_iteration;
if (echo_level_convergence_criteria > 0 && r_model_part.GetCommunicator().MyPID() == 0 ) {
KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("INNER ITERATION: ") << inner_iteration << std::endl;
}
// We solve one loop
r_process_info[NL_ITERATION_NUMBER] = 1;
is_converged = AuxiliarSolveSolutionStep();
// We check the convergence
if (r_process_info[NL_ITERATION_NUMBER] == 1) r_process_info[NL_ITERATION_NUMBER] = 2; // Trigger check
is_converged = mpMPCContactCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb);
if (echo_level_convergence_criteria > 0 && r_model_part.GetCommunicator().MyPID() == 0 ) {
if (is_converged) KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl;
else KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl;
}
}
} else {
is_converged = AuxiliarSolveSolutionStep();
}
return is_converged;
KRATOS_CATCH("");
}
/**
* @brief Solves the current step. This function returns true if a solution has been found, false otherwise. (auxiliar method)
*/
bool AuxiliarSolveSolutionStep()
{
// Getting flag INTERACTION
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
const bool update_each_nl_iteration = mThisParameters["update_each_nl_iteration"].GetBool();
VariableUtils().SetFlag(INTERACTION, update_each_nl_iteration, r_model_part.GetSubModelPart("ComputingContact").Conditions());
// Pointers needed in the solution
typename TSchemeType::Pointer p_scheme = this->GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = this->GetBuilderAndSolver();
auto& r_dof_set = p_builder_and_solver->GetDofSet();
TSystemMatrixType& rA = *BaseType::mpA;
TSystemVectorType& rDx = *BaseType::mpDx;
TSystemVectorType& rb = *BaseType::mpb;
// Initializing the parameters of the Newton-Raphson cycle
unsigned int iteration_number = 1;
r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number;
bool is_converged = false;
bool residual_is_updated = false;
// Computing nodal weights
ComputeNodalWeights();
p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb);
// // If enforcing NTN
// const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool();
// if (enforce_ntn) {
// EnforcingNTN();
// }
// Function to perform the building and the solving phase.
if (StrategyBaseType::mRebuildLevel > 0 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) {
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb);
} else {
TSparseSpace::SetToZero(rDx); //Dx=0.00;
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
// Debugging info
BaseType::EchoInfo(iteration_number);
// Updating the results stored in the database
BaseType::UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag());
p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
// Calculate reactions if required
if (BaseType::mCalculateReactionsFlag)
p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb);
if (is_converged) {
if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
}
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb);
}
// Iteration Cycle... performed only for NonLinearProblems
while (!is_converged && iteration_number++ < BaseType::mMaxIterationNumber) {
// Setting the number of iteration
r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number;
// Computing nodal weights
ComputeNodalWeights();
// Calling InitializeNonLinIteration
p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
// Shaping correctly the system
if (update_each_nl_iteration) {
p_builder_and_solver->SetUpDofSet(p_scheme, r_model_part);
p_builder_and_solver->SetUpSystem(r_model_part);
p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, BaseType::mpA, BaseType::mpDx, BaseType::mpb, r_model_part);
}
is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb);
// Call the linear system solver to find the correction mDx for the it is not called if there is no system to solve
if (SparseSpaceType::Size(rDx) != 0) {
if (StrategyBaseType::mRebuildLevel > 1 || !StrategyBaseType::mStiffnessMatrixIsBuilt) {
if (!BaseType::GetKeepSystemConstantDuringIterations()) {
//A = 0.00;
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb);
} else {
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
} else {
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
} else {
KRATOS_WARNING("NO DOFS") << "ATTENTION: no free DOFs!! " << std::endl;
}
// Debugging info
BaseType::EchoInfo(iteration_number);
// Updating the results stored in the database
BaseType::UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag());
p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
residual_is_updated = false;
// Calculate reactions if required
if (BaseType::mCalculateReactionsFlag)
p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb);
if (is_converged) {
if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
residual_is_updated = true;
}
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb);
}
}
// Plots a warning if the maximum number of iterations is exceeded
if (iteration_number >= BaseType::mMaxIterationNumber) {
BaseType::MaxIterationsExceeded();
} else {
KRATOS_INFO_IF("NR-Strategy", this->GetEchoLevel() > 0) << "Convergence achieved after " << iteration_number << " / " << BaseType::mMaxIterationNumber << " iterations" << std::endl;
}
// Recalculate residual if needed (note that some convergence criteria need it to be recalculated)
if (!residual_is_updated) {
// NOTE:
// The following part will be commented because it is time consuming
// and there is no obvious reason to be here. If someone need this
// part please notify the community via mailing list before uncommenting it.
// Pooyan.
// TSparseSpace::SetToZero(mb);
// p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb);
}
// Calculate reactions if required
if (BaseType::mCalculateReactionsFlag)
p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb);
return is_converged;
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
Parameters mThisParameters; /// The configuration parameters
typename TConvergenceCriteriaType::Pointer mpMPCContactCriteria; /// The contact criteria
///@}
///@name Protected Operators
///@{
/**
* @brief This method returns the defaulr parameters in order to avoid code duplication
* @return Returns the default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"inner_loop_iterations" : 5,
"update_each_nl_iteration" : false,
"enforce_ntn" : false
})" );
return default_parameters;
}
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
/**
* Copy constructor.
*/
ResidualBasedNewtonRaphsonMPCContactStrategy(const ResidualBasedNewtonRaphsonMPCContactStrategy& Other)
{
};
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
// /**
// * @brief This inforces NTN formulation
// */
// void EnforcingNTN()
// {
// // List of enforced nodes to not repeat
// std::unordered_set<IndexType> enforced_nodes;
//
// // Getting contact model part
// ModelPart& r_root_model_part = StrategyBaseType::GetModelPart().GetRootModelPart();
// ModelPart& r_computing_contact_model_part = StrategyBaseType::GetModelPart().GetSubModelPart("ComputingContact");
//
// // The process info
// const auto& r_process_info = r_root_model_part.GetProcessInfo();
//
// // Reset the pointers of the conditions
// for (auto& r_cond : r_computing_contact_model_part.Conditions()) {
// if (r_cond.Has(CONSTRAINT_POINTER)) {
// r_cond.SetValue(CONSTRAINT_POINTER, nullptr);
// }
// }
//
// // Iterate over the constraints
// IndexType counter = 1;
// for (auto& r_const : r_root_model_part.MasterSlaveConstraints()) {
// r_const.SetId(counter);
// ++counter;
// }
//
// // Auxiliar classes
// Matrix original_relation_matrix, relation_matrix;
// Vector original_constant_vector, constant_vector;
// ModelPart::DofsVectorType original_master_dofs, master_dofs, original_slave_dofs, slave_dofs;
//
// // Iterate over the constraints
// for (auto& r_const : r_computing_contact_model_part.MasterSlaveConstraints()) {
// // Getting original system
// r_const.GetLocalSystem(original_relation_matrix, original_constant_vector, r_process_info);
// r_const.GetDofList(original_slave_dofs, original_master_dofs, r_process_info);
//
// // TODO: Finish rebuild
//
// // Creating new constraint
// r_root_model_part.CreateNewMasterSlaveConstraint("LinearMasterSlaveConstraint", counter, master_dofs, slave_dofs, relation_matrix, constant_vector);
//
// // Setting to remove the old constraints
// r_const.Set(TO_ERASE, true);
//
// ++counter;
// }
//
// // Remove old constraints
// r_root_model_part.RemoveMasterSlaveConstraintsFromAllLevels(TO_ERASE);
//
// // Transfer constraints from the root to the computing model part
// FastTransferBetweenModelPartsProcess(r_computing_contact_model_part, r_root_model_part, FastTransferBetweenModelPartsProcess::EntityTransfered::CONSTRAINTS).Execute();
//
// // Reorder ids
// counter = 1;
// for (auto& r_const : r_root_model_part.MasterSlaveConstraints()) {
// r_const.SetId(counter);
// ++counter;
// }
// }
/**
* @brief This computes the nodal weights
*/
void ComputeNodalWeights()
{
// Getting contact model part
ModelPart& r_contact_model_part = StrategyBaseType::GetModelPart().GetSubModelPart("Contact");
// Reset the NODAL_PAUX and NODAL_MAUX
auto& r_nodes_array = r_contact_model_part.Nodes();
VariableUtils().SetNonHistoricalVariableToZero(NODAL_PAUX, r_nodes_array);
VariableUtils().SetNonHistoricalVariableToZero(NODAL_MAUX, r_nodes_array);
// We set the constraints active and inactive in function of the active set
auto& r_conditions_array = r_contact_model_part.Conditions();
auto it_cond_begin = r_conditions_array.begin();
// If enforcing NTN
const bool enforce_ntn = false;
// const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool();
// if (enforce_ntn) {
// VariableUtils().SetNonHistoricalVariable(NODAL_PAUX, 1.0, r_nodes_array);
// }
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) {
auto it_cond = it_cond_begin + i;
// Only slave conditions
if (it_cond->Is(SLAVE)) {
auto& r_geometry = it_cond->GetGeometry();
Vector lumping_factor;
lumping_factor = r_geometry.LumpingFactors(lumping_factor);
const double domain_size = r_geometry.DomainSize();
for (IndexType i_node = 0; i_node < r_geometry.size(); ++i_node) {
auto& r_node = r_geometry[i_node];
if (!enforce_ntn) {
AtomicAdd(r_node.GetValue(NODAL_PAUX), 1.0);
}
AtomicAdd(r_node.GetValue(NODAL_MAUX), lumping_factor[i_node] * domain_size);
}
}
}
}
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedNewtonRaphsonMPCContactStrategy */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // namespace Kratos
#endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY */
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=Nt-1;t1++) {
lbp=ceild(t1+1,2);
ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-4,6),ceild(8*t2-Nz-11,24));t3<=min(floord(4*Nt+Ny-9,24),floord(4*t1+Ny-1,24));t3++) {
for (t4=max(max(ceild(t1-6,8),ceild(8*t2-Nz-19,32)),ceild(24*t3-Ny-19,32));t4<=min(min(floord(4*Nt+Nx-9,32),floord(4*t1+Nx-1,32)),floord(24*t3+Nx+11,32));t4++) {
for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),6*t3+4),8*t4+6);t5++) {
for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
lu.pluto.par.l2tile.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
double L[N][N];
double U[N][N];
double A[N][N+13];
void init_arrays()
{
int i, j, k;
/* have to initialize this matrix properly to prevent
* division by zero
*/
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
L[i][j] = 0.0;
U[i][j] = 0.0;
}
}
for (i=0; i<N; i++) {
for (j=0; j<=i; j++) {
L[i][j] = i+j+1;
U[j][i] = i+j+1;
}
}
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
for (k=0; k<N; k++) {
A[i][j] += L[i][k]*U[k][j];
}
}
}
}
double rtclock()
{
struct timezone tzp;
struct timeval tp;
int stat;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main()
{
init_arrays();
double annot_t_start=0, annot_t_end=0, annot_t_total=0;
int annot_i;
for (annot_i=0; annot_i<REPS; annot_i++)
{
annot_t_start = rtclock();
#include <math.h>
#include <assert.h>
#include <omp.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
int c1, c2, c3, c4, c5, c6, c7, c8, c9;
register int lb, ub, lb1, ub1, lb2, ub2;
register int lbv, ubv;
if (N >= 2) {
for (c1=-1;c1<=floord(2*N-3,256);c1++) {
lb1=max(max(ceild(256*c1-N+2,256),0),ceild(128*c1-127,256));
ub1=min(floord(256*c1+255,256),floord(N-1,256));
#pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7,c8,c9)
for (c2=lb1; c2<=ub1; c2++) {
for (c3=max(ceild(128*c1-128*c2-32385,32640),ceild(128*c1-128*c2-127,128));c3<=floord(N-1,256);c3++) {
for (c4=max(max(8*c1-8*c2,0),8*c1-8*c2-1792*c3-1778);c4<=min(min(min(min(floord(N-2,32),floord(128*c2+127,16)),floord(3968*c3+3937,16)),8*c1-8*c2+7),floord(128*c3+127,16));c4++) {
for (c5=max(max(0,ceild(16*c4-15,16)),8*c2);c5<=min(8*c2+7,floord(N-1,32));c5++) {
for (c6=max(max(max(max(ceild(16*c4-465,496),ceild(8*c1-8*c2-8*c3-c4-217,223)),ceild(-8*c1+8*c2+8*c3+c4-217,225)),8*c3),ceild(16*c4-15,16));c6<=min(floord(N-1,32),8*c3+7);c6++) {
if ((c1 == c2+c3) && (c4 == c6)) {
for (c7=max(0,32*c6);c7<=min(min(N-2,32*c5+30),32*c6+30);c7++) {
{
lbv=max(c7+1,32*c5); ubv=min(N-1,32*c5+31);
#pragma ivdep
#pragma vector always
for (c9=lbv; c9<=ubv; c9++) {
{A[c7][c9]=A[c7][c9]/A[c7][c7];} ;
}
}
for (c8=c7+1;c8<=min(N-1,32*c6+31);c8++) {
{
lbv=max(32*c5,c7+1); ubv=min(N-1,32*c5+31);
#pragma ivdep
#pragma vector always
for (c9=lbv; c9<=ubv; c9++) {
{A[c8][c9]=A[c8][c9]-A[c8][c7]*A[c7][c9];} ;
}
}
}
}
}
{
for (c7 = max(0, 32 * c4); c7 <= min(min(32 * c6 - 1, 32 * c5 + 30), 32 * c4 + 31) - 3; c7 = c7 + 4) {
for (c8 = 32 * c6; c8 <= min(N - 1, 32 * c6 + 31) - 3; c8 = c8 + 4) {
{
lbv=max(c7+1,32*c5); ubv=min(N-1,32*c5+31);
#pragma ivdep
#pragma vector always
for (c9=lbv; c9<=ubv; c9++) {
{A[c8][c9]=A[c8][c9]-A[c8][c7]*A[c7][c9];};
{A[(c8 + 1)][c9]=A[(c8 + 1)][c9]-A[(c8 + 1)][c7]*A[c7][c9];};
{A[(c8 + 2)][c9]=A[(c8 + 2)][c9]-A[(c8 + 2)][c7]*A[c7][c9];};
{A[(c8 + 3)][c9]=A[(c8 + 3)][c9]-A[(c8 + 3)][c7]*A[c7][c9];};
}
}
{
lbv=max((c7+1)+1,32*c5); ubv=min(N-1,32*c5+31);
#pragma ivdep
#pragma vector always
for (c9=lbv; c9<=ubv; c9++) {
{A[c8][c9]=A[c8][c9]-A[c8][(c7 + 1)]*A[(c7 + 1)][c9];};
{A[(c8 + 1)][c9]=A[(c8 + 1)][c9]-A[(c8 + 1)][(c7 + 1)]*A[(c7 + 1)][c9];};
{A[(c8 + 2)][c9]=A[(c8 + 2)][c9]-A[(c8 + 2)][(c7 + 1)]*A[(c7 + 1)][c9];};
{A[(c8 + 3)][c9]=A[(c8 + 3)][c9]-A[(c8 + 3)][(c7 + 1)]*A[(c7 + 1)][c9];};
}
}
{
lbv=max((c7+2)+1,32*c5); ubv=min(N-1,32*c5+31);
#pragma ivdep
#pragma vector always
for (c9=lbv; c9<=ubv; c9++) {
{A[c8][c9]=A[c8][c9]-A[c8][(c7 + 2)]*A[(c7 + 2)][c9];};
{A[(c8 + 1)][c9]=A[(c8 + 1)][c9]-A[(c8 + 1)][(c7 + 2)]*A[(c7 + 2)][c9];};
{A[(c8 + 2)][c9]=A[(c8 + 2)][c9]-A[(c8 + 2)][(c7 + 2)]*A[(c7 + 2)][c9];};
{A[(c8 + 3)][c9]=A[(c8 + 3)][c9]-A[(c8 + 3)][(c7 + 2)]*A[(c7 + 2)][c9];};
}
}
{
lbv=max((c7+3)+1,32*c5); ubv=min(N-1,32*c5+31);
#pragma ivdep
#pragma vector always
for (c9=lbv; c9<=ubv; c9++) {
{A[c8][c9]=A[c8][c9]-A[c8][(c7 + 3)]*A[(c7 + 3)][c9];};
{A[(c8 + 1)][c9]=A[(c8 + 1)][c9]-A[(c8 + 1)][(c7 + 3)]*A[(c7 + 3)][c9];};
{A[(c8 + 2)][c9]=A[(c8 + 2)][c9]-A[(c8 + 2)][(c7 + 3)]*A[(c7 + 3)][c9];};
{A[(c8 + 3)][c9]=A[(c8 + 3)][c9]-A[(c8 + 3)][(c7 + 3)]*A[(c7 + 3)][c9];};
}
}
}
for (; c8 <= min(N - 1, 32 * c6 + 31); c8 = c8 + 1) {
{
lbv=max(c7+1,32*c5); ubv=min(N-1,32*c5+31);
#pragma ivdep
#pragma vector always
for (c9=lbv; c9<=ubv; c9++) {
{A[c8][c9]=A[c8][c9]-A[c8][c7]*A[c7][c9];};
}
}
{
lbv=max((c7+1)+1,32*c5); ubv=min(N-1,32*c5+31);
#pragma ivdep
#pragma vector always
for (c9=lbv; c9<=ubv; c9++) {
{A[c8][c9]=A[c8][c9]-A[c8][(c7 + 1)]*A[(c7 + 1)][c9];};
}
}
{
lbv=max((c7+2)+1,32*c5); ubv=min(N-1,32*c5+31);
#pragma ivdep
#pragma vector always
for (c9=lbv; c9<=ubv; c9++) {
{A[c8][c9]=A[c8][c9]-A[c8][(c7 + 2)]*A[(c7 + 2)][c9];};
}
}
{
lbv=max((c7+3)+1,32*c5); ubv=min(N-1,32*c5+31);
#pragma ivdep
#pragma vector always
for (c9=lbv; c9<=ubv; c9++) {
{A[c8][c9]=A[c8][c9]-A[c8][(c7 + 3)]*A[(c7 + 3)][c9];};
}
}
}
}
for (; c7 <= min(min(32 * c6 - 1, 32 * c5 + 30), 32 * c4 + 31); c7 = c7 + 1) {
for (c8 = 32 * c6; c8 <= min(N - 1, 32 * c6 + 31) - 3; c8 = c8 + 4)
{
lbv=max(c7+1,32*c5); ubv=min(N-1,32*c5+31);
#pragma ivdep
#pragma vector always
for (c9=lbv; c9<=ubv; c9++) {
{A[c8][c9]=A[c8][c9]-A[c8][c7]*A[c7][c9];};
{A[(c8 + 1)][c9]=A[(c8 + 1)][c9]-A[(c8 + 1)][c7]*A[c7][c9];};
{A[(c8 + 2)][c9]=A[(c8 + 2)][c9]-A[(c8 + 2)][c7]*A[c7][c9];};
{A[(c8 + 3)][c9]=A[(c8 + 3)][c9]-A[(c8 + 3)][c7]*A[c7][c9];};
}
}
for (; c8 <= min(N - 1, 32 * c6 + 31); c8 = c8 + 1)
{
lbv=max(c7+1,32*c5); ubv=min(N-1,32*c5+31);
#pragma ivdep
#pragma vector always
for (c9=lbv; c9<=ubv; c9++) {
{A[c8][c9]=A[c8][c9]-A[c8][c7]*A[c7][c9];};
}
}
}
}
if ((c1 == c2+c3) && (-c4 == -c6) && (c4 <= min(floord(N-33,32),floord(32*c5-1,32)))) {
{
lbv=max(32*c4+32,32*c5); ubv=min(N-1,32*c5+31);
#pragma ivdep
#pragma vector always
for (c9=lbv; c9<=ubv; c9++) {
{A[32*c4+31][c9]=A[32*c4+31][c9]/A[32*c4+31][32*c4+31];} ;
}
}
}
}
}
}
}
}
}
}
annot_t_end = rtclock();
annot_t_total += annot_t_end - annot_t_start;
}
annot_t_total = annot_t_total / REPS;
#ifndef TEST
printf("%f\n", annot_t_total);
#else
{
int i, j;
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
if (j%100==0)
printf("\n");
printf("%f ",A[i][j]);
}
printf("\n");
}
}
#endif
return ((int) A[0][0]);
}
|
zherk.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_herk
*
* Performs one of the Hermitian rank k operations
*
* \f[ C = \alpha A \times A^H + \beta C, \f]
* or
* \f[ C = \alpha A^H \times A + \beta C, \f]
*
* where alpha and beta are real scalars, C is an n-by-n Hermitian
* matrix, and A is an n-by-k matrix in the first case and a k-by-n
* matrix in the second case.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans: \f[ C = \alpha A \times A^H + \beta C; \f]
* - PlasmaConjTrans: \f[ C = \alpha A^H \times A + \beta C. \f]
*
* @param[in] n
* The order of the matrix C. n >= 0.
*
* @param[in] k
* If trans = PlasmaNoTrans, number of columns of the A matrix;
* if trans = PlasmaConjTrans, number of rows of the A matrix.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* A is an lda-by-ka matrix.
* If trans = PlasmaNoTrans, ka = k;
* if trans = PlasmaConjTrans, ka = n.
*
* @param[in] lda
* The leading dimension of the array A.
* If trans = PlasmaNoTrans, lda >= max(1, n);
* if trans = PlasmaConjTrans, lda >= max(1, k).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] pC
* C is an ldc-by-n matrix.
* On exit, the uplo part of the matrix is overwritten
* by the uplo part of the updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1, n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_zherk
* @sa plasma_cherk
*
******************************************************************************/
int plasma_zherk(plasma_enum_t uplo, plasma_enum_t trans,
int n, int k,
double alpha, plasma_complex64_t *pA, int lda,
double beta, plasma_complex64_t *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaConjTrans)) {
plasma_error("illegal value of trans");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (k < 0) {
plasma_error("illegal value of k");
return -4;
}
int am, an;
if (trans == PlasmaNoTrans) {
am = n;
an = k;
}
else {
am = k;
an = n;
}
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldc < imax(1, n)) {
plasma_error("illegal value of ldc");
return -10;
}
// quick return
if (n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return PlasmaSuccess;
// Set tiling parameters.
int nb = plasma->nb;
// Initialize tile matrix descriptors.
plasma_desc_t A;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
am, an, 0, 0, am, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, n, 0, 0, n, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Create sequence.
plasma_sequence_t *sequence = NULL;
retval = plasma_sequence_create(&sequence);
if (retval != PlasmaSuccess) {
plasma_error("plasma_sequence_create() failed");
return retval;
}
// Initialize request.
plasma_request_t request = PlasmaRequestInitializer;
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, sequence, &request);
plasma_omp_zge2desc(pC, ldc, C, sequence, &request);
// Call the tile async function.
plasma_omp_zherk(uplo, trans,
alpha, A,
beta, C,
sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(C, pC, ldc, sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&C);
// Return status.
int status = sequence->status;
plasma_sequence_destroy(sequence);
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_herk
*
* Performs rank k update.
* Non-blocking tile version of plasma_zherk().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans: \f[ C = \alpha A \times A^H + \beta C; \f]
* - PlasmaConjTrans: \f[ C = \alpha A^H \times A + \beta C. \f]
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* Descriptor of matrix C.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zherk
* @sa plasma_omp_zherk
* @sa plasma_omp_cherk
* @sa plasma_omp_dherk
* @sa plasma_omp_sherk
*
******************************************************************************/
void plasma_omp_zherk(plasma_enum_t uplo, plasma_enum_t trans,
double alpha, plasma_desc_t A,
double beta, plasma_desc_t C,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaConjTrans)) {
plasma_error("illegal value of trans");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
int k = trans == PlasmaNoTrans ? A.n : A.m;
if (C.m == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return;
// Call the parallel function.
plasma_pzherk(uplo, trans,
alpha, A,
beta, C,
sequence, request);
}
|
helper_classes_for_constraint_builder.h | // ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Aditya Ghantasala
//
//
#if !defined(AUXILIARY_GLOBAL_MASTER_SLAVE_RELATION)
#define AUXILIARY_GLOBAL_MASTER_SLAVE_RELATION
// System includes
#include <vector>
#include <unordered_map>
// project includes
#include "includes/define.h"
#include "includes/dof.h"
#include "includes/node.h"
#include "includes/lock_object.h"
namespace Kratos
{
namespace Internals
{
///@name Internals Globals
///@{
///@}
///@name Type Definitions
///@{
/// Geometric definitions
typedef Node<3> NodeType;
typedef Geometry<NodeType> GeometryType;
/// Matrix and vector definition
typedef Kratos::Matrix MatrixType;
typedef Kratos::Vector VectorType;
/// Indexes definition
typedef IndexedObject::IndexType IndexType;
typedef std::vector<IndexType> VectorIndexType;
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
/**
* @brief this method checks if any of the nodes of the given rGeometry is marked SLAVE.
* @param rGeometry The geometry to check for.
*/
bool HasSlaveNode(GeometryType& rGeometry)
{
for(auto& node : rGeometry)
if (node.IsDefined(SLAVE))
return node.Is(SLAVE);
return false;
}
/**
* @brief This function resizes the given matrix and vector pair to the new size provided.
* And Initializes the extra part added to zero.
* @param rMatrix matrix to be resized
* @param rVector vector to be resized
* @param FinalSize the final size of the resized quantities.
*/
void ResizeAndInitializeLocalMatrices(MatrixType& rMatrix, VectorType& rVector,
IndexType FinalSize)
{
KRATOS_TRY
// storing the initial matrix and vector and their properties
KRATOS_ERROR_IF(rMatrix.size1() != rVector.size())<<"ResizeAndInitializeLocalMatrices :: Dimension of the matrix and vector passed are not the same !"<<std::endl;
const IndexType initial_sys_size = rMatrix.size1();
MatrixType matrix(initial_sys_size, initial_sys_size);
noalias(matrix) = rMatrix;
VectorType vector(initial_sys_size);
noalias(vector) = rVector;
rMatrix.resize(FinalSize, FinalSize, false);
rVector.resize(FinalSize, false);
// reassigning the original part of the matrix
for (IndexType m = 0; m < initial_sys_size; ++m)
{
for (IndexType n = 0; n < initial_sys_size; ++n)
{
rMatrix(m,n) = matrix(m,n);
}
rVector(m) = vector(m);
}
// Making the extra part of matrix zero
for (IndexType m = initial_sys_size; m < FinalSize; ++m)
{
for (IndexType n = 0; n < FinalSize; ++n)
{
rMatrix(m, n) = 0.0;
rMatrix(n, m) = 0.0;
}
rVector(m) = 0.0;
}
KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise::ResizeAndInitializeLocalMatrices failed ..");
}
///@}
///@name Internals Classes
///@{
/**
* @class AuxiliaryGlobalMasterSlaveConstraint
* @ingroup KratosCore
* @brief This class stores the information regarding the AuxiliaryGlobalMasterSlaveConstraint equation.
* Naming convention is defined like this. (each object of this class will store one equation in the given form
*
* SlaveEquationId = w_1*MasterEquationId_1 + w_2*MasterEquationId_2 + ..... + w_n*MasterEquationId_n
*
* This stores the condensed form of the MasterSlaveConstraint objects into one object. if only one relation for a slave is added as
* MasterSlaveConstraint then there will only be one entry for master for its corresponding AuxiliaryGlobalMasterSlaveConstraint.
* Currently this class is designed to hold only one equation. There is only one unique object of this class for each slave.
*
* Future plan is to also make it possible to work with matrices (T) and vectors (for slave and master equation ids and constants)
*
*
* IMPORTANT : This is not seen by the user. This is a helper data structure which is exists only in the builder and solver.
*
* @author Aditya Ghantasala
*/
class AuxiliaryGlobalMasterSlaveConstraint : public IndexedObject
{
public:
///@name Type Definitions
///@{
typedef IndexedObject BaseType;
typedef Internals::IndexType IndexType;
typedef Internals::MatrixType MatrixType;
typedef Internals::VectorType VectorType;
typedef std::vector<IndexType> EquationIdVectorType;
/// Pointer definition of AuxiliaryGlobalMasterSlaveConstraint
KRATOS_CLASS_POINTER_DEFINITION(AuxiliaryGlobalMasterSlaveConstraint);
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor of the class
* @param SlaveEquationId the slave equation id for which this class is being constructed.
*/
explicit AuxiliaryGlobalMasterSlaveConstraint(IndexType SlaveEquationId = 0) : IndexedObject(SlaveEquationId),
mLhsValue(0.0),
mRhsValue(0.0)
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Function to get the slave equation Id corresponding to this constraint.
* @param Constant the value of the constant to be assigned.
*/
IndexType SlaveEquationId() const { return this->Id(); }
/**
* @brief Function to set the lefthand side of the constraint (the slave dof value)
* @param LhsValue the value of the lhs (the slave dof value)
*/
void SetLeftHandSide(const double LhsValue)
{
mLockObject.SetLock();
mLhsValue = LhsValue;
mLockObject.UnSetLock();
}
/**
* @brief Function to update the righthand side of the constraint (the combination of all the master dof values and constants)
* @param RHSValue the value of the lhs (the slave dof value)
*/
void SetRightHandSide(const double RhsValue)
{
mRhsValue = RhsValue;
}
void UpdateRightHandSide(const double RhsValueUpdate)
{
mLockObject.SetLock();
mRhsValue = mRhsValue + RhsValueUpdate;
mLockObject.UnSetLock();
}
// Get number of masters for this slave
IndexType NumberOfMasters() const
{
return mMasterEquationIdVector.size();
}
/**
* @brief this determines the master equation IDs connected to this constraint
* @param rResult the elemental equation ID vector
*/
virtual void EquationIdsVector(IndexType& rSlaveEquationId,
EquationIdVectorType& rMasterEquationIds)
{
if (rMasterEquationIds.size() != mMasterEquationIdVector.size())
rMasterEquationIds.resize(this->NumberOfMasters(), false);
rSlaveEquationId = this->SlaveEquationId();
rMasterEquationIds = mMasterEquationIdVector;
}
/**
* @brief this is called during the assembling process in order
* to calculate all elemental contributions to the global system
* matrix and the right hand side
* @param rMasterWeightsVector the elemental left hand side matrix
* @param rConstant the elemental right hand side
*/
virtual void CalculateLocalSystem(VectorType &rMasterWeightsVector,
double &rConstant)
{
if (rMasterWeightsVector.size() != this->NumberOfMasters())
rMasterWeightsVector.resize(this->NumberOfMasters(), false);
for (IndexType i = 0; i < this->NumberOfMasters(); ++i)
rMasterWeightsVector(i) = mMasterWeightsVector[i];
/// Here this is required because, when in the builder and solver , we are actually imposing the constraint on the update
/// of the DOF value (residual formulation), this does not necessarily guarantee the DOFs themselves follow the constraint equation.
/// So, we calculate the LHS value and RHS value of the constraint equation (with DOF values) and if they are not
/// satisfying the constraint, we use the residual as the constant.
rConstant = mRhsValue - mLhsValue;
}
/**
* @brief This method clears the equations ids
*/
void Clear()
{
//clearing the contents
mMasterEquationIdVector.clear();
mMasterWeightsVector.clear();
}
/**
* @brief This method adds a new master
*/
void AddMaster(const IndexType MasterEquationId, const double Weight)
{
const int index = GetMasterEquationIdPosition(MasterEquationId);
if (index >= 0) {
#pragma omp atomic
mMasterWeightsVector[index] += Weight;
} else {
mLockObject.SetLock(); // locking for exclusive access to the vectors mMasterEquationIdVector and mMasterWeightsVectors
mMasterEquationIdVector.push_back(MasterEquationId);
mMasterWeightsVector.push_back(Weight);
mLockObject.UnSetLock(); // unlocking
}
}
/**
* @brief This method resers the LHS/RHS relationship
*/
void Reset()
{
this->mLhsValue = 0.0;
this->mRhsValue = 0.0;
}
/**
* @brief This method returns the correspondin EquationId for the master
*/
int GetMasterEquationIdPosition(const IndexType MasterEquationId) const
{
auto it = find(mMasterEquationIdVector.begin(), mMasterEquationIdVector.end(), MasterEquationId);
if (it != mMasterEquationIdVector.end())
return it - mMasterEquationIdVector.begin();
else
return -1;
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "AuxiliaryGlobalMasterSlaveConstraint # " << this->Id();
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
double mLhsValue;
double mRhsValue;
EquationIdVectorType mMasterEquationIdVector;
std::vector<double> mMasterWeightsVector;
LockObject mLockObject;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Serialization
///@{
friend class Serializer;
void save(Serializer &rSerializer) const override
{
// No need to save anything from this class as they will be reconstructed
KRATOS_SERIALIZE_SAVE_BASE_CLASS(rSerializer, IndexedObject);
}
void load(Serializer &rSerializer) override
{
// No need to load anything from this class as they will be reconstructed
KRATOS_SERIALIZE_LOAD_BASE_CLASS(rSerializer, IndexedObject);
}
///@}
}; // End of ConstraintEquation class
/**
* @struct LocalIndices
* @ingroup KratosCore
* @brief This class stores the stores three different vectors of local internal, slave, master indices
* which are used in constraint builder and solver.
*
* @author Aditya Ghantasala
*/
struct LocalIndices
{
typedef Internals::IndexType IndexType;
typedef Internals::VectorIndexType VectorIndexType;
void Reset()
{
internal_index_vector.resize(0);
master_index_vector.resize(0);
slave_index_vector.resize(0);
container_master_weights.resize(0);
container_master_slaves.resize(0);
processed_master_indices.resize(0);
}
VectorIndexType internal_index_vector; // indicies corresponding to internal DOFs
VectorIndexType master_index_vector; // indicies corresponding to master DOFs
VectorIndexType slave_index_vector; // indicies corresponding to slave DOFs
std::vector<double> container_master_weights; // list of master weights in the order in which they are processed
std::vector<IndexType> container_master_slaves; // list of slave indices corresponding to each master processed
std::vector<IndexType> processed_master_indices; // list of master indices in the order in which they are processed.
};
///@}
///@name Type Definitions
///@{
/// AuxiliaryGlobalMasterSlaveConstraint definitions
typedef Internals::AuxiliaryGlobalMasterSlaveConstraint AuxiliaryGlobalMasterSlaveConstraintType;
//typedef PointerVectorSet<AuxiliaryGlobalMasterSlaveConstraint, IndexedObject> GlobalMasterSlaveRelationContainerType;
typedef std::unordered_map< IndexType, unique_ptr< AuxiliaryGlobalMasterSlaveConstraintType > > GlobalMasterSlaveRelationContainerType;
///@}
///@name Internal Classes
///@{
/**
* @class ConstraintImposer
* @ingroup KratosCore
* @author Aditya Ghantasala
*/
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver
> // Made template to include the possibility to work with both local and global matrices for imposing the constraints.
class ConstraintImposer {
public:
///@name Type Definitions
///@{
typedef Internals::AuxiliaryGlobalMasterSlaveConstraint AuxiliaryGlobalMasterSlaveRelationType;
typedef std::unordered_map< IndexType, unique_ptr< AuxiliaryGlobalMasterSlaveRelationType > > GlobalMasterSlaveRelationContainerType;
typedef std::vector<Dof<double>::Pointer> DofsVectorType;
typedef typename TDenseSpace::MatrixType LocalSystemMatrixType;
typedef typename TDenseSpace::VectorType LocalSystemVectorType;
typedef Internals::LocalIndices LocalIndicesType;
typedef Kratos::Matrix MatrixType;
typedef Kratos::Vector VectorType;
typedef std::vector<IndexType> VectorIndexType;
typedef std::vector<IndexType> EquationIdVectorType;
///@}
///@name Life Cycle
///@{
explicit ConstraintImposer(GlobalMasterSlaveRelationContainerType& rGlobalMasterSlaveRelations)
: mrGlobalMasterSlaveConstraints(rGlobalMasterSlaveRelations)
{
}
~ConstraintImposer()
{
}
ConstraintImposer( const ConstraintImposer &OtherObject) :
mrGlobalMasterSlaveConstraints (OtherObject.mrGlobalMasterSlaveConstraints) // copy constructor
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief This adds the equation IDs of masters of all the slaves corresponding to pCurrentElement to EquationIds
* @details Here cannot use the pure Geometry because, we would need the dof list from the element/geometry.
* @param rCurrentContainer the element or condition where the rEquationIds to be modified for master-slave constraints
* @param rEquationIds the equation id vector for the above element or condition
* @param rCurrentProcessInfo the current process info
*/
template <typename TContainerType>
void ApplyConstraints(TContainerType& rCurrentContainer,
typename TContainerType::EquationIdVectorType& rEquationIds,
ProcessInfo& rCurrentProcessInfo)
{
KRATOS_TRY
this->Reset();
// If no slave is found for this container , no need of going on
if (! Internals::HasSlaveNode(rCurrentContainer.GetGeometry()))
{
return;
}
DofsVectorType ContainerDofs;
rCurrentContainer.GetDofList(ContainerDofs, rCurrentProcessInfo);
IndexType slave_equation_id;
// For each node check if it is ac slave or not If it is .. we change the Transformation matrix
for (IndexType j = 0; j < ContainerDofs.size(); j++)
{
slave_equation_id = ContainerDofs[j]->EquationId(); // consider everything as a slave.
// Get the global constraint equation for this slave.
auto global_master_slave_constraint = mrGlobalMasterSlaveConstraints.find(slave_equation_id);
if (global_master_slave_constraint != mrGlobalMasterSlaveConstraints.end())
{ // if a equation exists for this slave
global_master_slave_constraint->second->EquationIdsVector(slave_equation_id, mMasterEquationIds); // get the slave and master equation ids for this slave.
rEquationIds.reserve(mMasterEquationIds.size());
for (auto &master_eq_id : mMasterEquationIds)
{
// Add the current slaves master eq ids to the equation ids
rEquationIds.push_back(master_eq_id);
}
}
}
KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise::ApplyConstraints failed ..");
}
/**
* @brief This function modifies the LHS and RHS of the rCurrentContainer to account for any master-slave constraints its nodes/dofs
* are carrying.
* @details Here cannot use the pure Geometry because, we would need the dof list from the element/geometry.
* @param rCurrentContainer the element or condition where the rEquationIds to be modified for master-slave constraints
* @param rLHSContribution the LHS contribution of the rCurrentContainer
* @param rRHSContribution the RHS contribution of the rCurrentContainer
* @param rEquationIds the equation id vector for the above element or condition
* @param rCurrentProcessInfo the current process info
*/
template <typename TContainerType>
void ApplyConstraints(TContainerType& rCurrentContainer,
LocalSystemMatrixType& rLHSContribution,
LocalSystemVectorType& rRHSContribution,
typename TContainerType::EquationIdVectorType& rEquationIds,
ProcessInfo& rCurrentProcessInfo)
{
KRATOS_TRY
// If no slave is found for this container , no need of going on
if (! Internals::HasSlaveNode(rCurrentContainer.GetGeometry()))
return;
this->Reset();
// Saving th original system size
const IndexType initial_sys_size = rLHSContribution.size1();
// first fill in the rEquationIds using the above function (overloaded one)
ApplyConstraints<TContainerType>(rCurrentContainer, rEquationIds, rCurrentProcessInfo); // now rEquationIds has all the slave equation ids appended to it.
IndexType total_number_of_masters = rEquationIds.size() - initial_sys_size;
// Calculating the local indices corresponding to internal, master, slave dofs of this container
CalculateLocalIndices(rEquationIds, mLocalIndices, total_number_of_masters);
// resizing the matrices to the new required length
ResizeAndInitializeLocalMatrices(rLHSContribution, rRHSContribution, rEquationIds.size());
// Calculating the F = T'*(F-K*g) which is local to this container
ModifyRHSForConstraints(rLHSContribution, rRHSContribution, rEquationIds);
// Calculating the K = T' * K *T which is local to this container
ModifyLHSForConstraints(rLHSContribution, rRHSContribution, rEquationIds);
KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise:: Applying Multipoint constraints failed ..");
}
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
GlobalMasterSlaveRelationContainerType& mrGlobalMasterSlaveConstraints;
// For Formulating which are the internal, slave indices locally.
LocalIndicesType mLocalIndices;
// container's transformation matrix and constant vector
MatrixType mTransformationMatrixLocal;
VectorType mConstantVectorLocal;
// containers for holding equation ids and container dofs
EquationIdVectorType mMasterEquationIds;
DofsVectorType mContainerDofs;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief This function does two operations : K = T' * K * T . This operations are done in place.
* Meaning that there is no memory duplication and no explicit matrix and matrix or matrix vector multiplication.
* Individual entries of K and F are modified to achieve the result.
* @param rLHSContribution The lhs matrix of the container
* @param rRHSContribution The rhs vector of the container
* @param rEquationIds the list of equation ids (extended with the masters).
*/
void ModifyLHSForConstraints(MatrixType &rLHSContribution, VectorType& rRHSContribution, EquationIdVectorType &rEquationIds)
{
mLocalIndices.container_master_weights.reserve(mLocalIndices.master_index_vector.size());
mLocalIndices.container_master_slaves.reserve(mLocalIndices.master_index_vector.size());
mLocalIndices.processed_master_indices.reserve(mLocalIndices.master_index_vector.size());
IndexType slave_equation_id;
EquationIdVectorType master_equation_ids;
VectorType master_weights_vector;
double slave_constant;
for (auto& slave_index : mLocalIndices.slave_index_vector) { // Loop over all the slaves for this container
// Get the global equation for this constraint
auto global_master_slave_constraint = mrGlobalMasterSlaveConstraints.find(rEquationIds[slave_index]);
// Get the tranformation matrix and constant_vector from the current slave
global_master_slave_constraint->second->EquationIdsVector(slave_equation_id, master_equation_ids);
global_master_slave_constraint->second->CalculateLocalSystem(master_weights_vector, slave_constant);
IndexType master_index = 0;
double master_weight = 0.0;
IndexType i_master = 0;
for (auto& master_eq_id : master_equation_ids)
{ // Loop over all the masters the slave has
master_index = std::distance(rEquationIds.begin(), std::find(rEquationIds.begin(), rEquationIds.end(), master_eq_id));
//master_weight = mTransformationMatrixLocal(slave_index,master_index);
master_weight = master_weights_vector(i_master);
for (auto& internal_index : mLocalIndices.internal_index_vector) {
// For K(m,u) and K(u,m)
rLHSContribution(internal_index, master_index) += rLHSContribution(internal_index, slave_index) * master_weight;
rLHSContribution(master_index, internal_index) += rLHSContribution(slave_index, internal_index) * master_weight;
}
mLocalIndices.container_master_weights.push_back( master_weight );
mLocalIndices.container_master_slaves.push_back( slave_index );
mLocalIndices.processed_master_indices.push_back( master_index );
i_master++;
} // Loop over all the masters the slave has
}
//Adding contribution from slave to Kmm
IndexType master_i = 0;
for (auto& master_index : mLocalIndices.processed_master_indices) {
IndexType master_i_other = 0;
for (auto& master_index_other : mLocalIndices.processed_master_indices) {
rLHSContribution(master_index, master_index_other) += mLocalIndices.container_master_weights[master_i] *
rLHSContribution(mLocalIndices.container_master_slaves[master_i], mLocalIndices.container_master_slaves[master_i_other])
* mLocalIndices.container_master_weights[master_i_other];
master_i_other++;
}
master_i++;
}
// For K(u,s) and K(s,u). This is to be done at the end only
for (auto& slave_index : mLocalIndices.slave_index_vector) {
for (auto& internal_index : mLocalIndices.internal_index_vector) {
rLHSContribution(slave_index, internal_index) = 0.0;
rLHSContribution(internal_index, slave_index) = 0.0;
}
}
}
/**
* @brief This function does two operation : F = T'*(F-K*b). This operation is done in place.
* Meaning that there is no memory duplication and no explicit matrix and matrix or matrix vector multiplication.
* Individual entries of K and F are modified to achieve the result.
* @param rLHSContribution The lhs matrix of the container
* @param rRHSContribution The rhs vector of the container
* @param rEquationIds the list of equation ids (extended with the masters).
*/
void ModifyRHSForConstraints(MatrixType &rLHSContribution, VectorType& rRHSContribution, EquationIdVectorType &rEquationIds)
{
IndexType slave_equation_id;
EquationIdVectorType master_equation_ids;
VectorType master_weights_vector;
double slave_constant;
VectorType master_weights_vector_other;
double constant_other;
for (auto& slave_index : mLocalIndices.slave_index_vector) { // Loop over all the slaves for this container
// Get the global equation for this constraint
auto global_master_slave_constraint = mrGlobalMasterSlaveConstraints.find(rEquationIds[slave_index]);
// Get the tranformation matrix and constant_vector from the current slave
global_master_slave_constraint->second->EquationIdsVector(slave_equation_id, master_equation_ids);
global_master_slave_constraint->second->CalculateLocalSystem(master_weights_vector, slave_constant);
IndexType master_index = 0;
double master_weight = 0.0;
IndexType i_master = 0;
for (auto& master_eq_id : master_equation_ids)
{ // Loop over all the masters the slave has
master_index = std::distance(rEquationIds.begin(), std::find(rEquationIds.begin(), rEquationIds.end(), master_eq_id));
//master_weight = mTransformationMatrixLocal(slave_index,master_index);
master_weight = master_weights_vector(i_master);
for (auto& internal_index : mLocalIndices.internal_index_vector) {
rRHSContribution(internal_index) -= rLHSContribution(internal_index, slave_index) * slave_constant;
}
// For RHS(m) += A'*LHS(s,s)*B
for (auto& slave_index_other : mLocalIndices.slave_index_vector) {
auto global_master_slave_constraint_other = mrGlobalMasterSlaveConstraints.find(rEquationIds[slave_index_other]);
global_master_slave_constraint_other->second->CalculateLocalSystem(master_weights_vector_other, constant_other);
rRHSContribution(master_index) -= rLHSContribution(slave_index, slave_index_other) * master_weight * constant_other;
}
// Changing the RHS side of the equation
rRHSContribution(master_index) += master_weight * rRHSContribution(slave_index);
i_master++;
} // Loop over all the masters the slave has
rRHSContribution(slave_index) = 0.0;
}
}
/**
* @brief Resets the member vectors and matrices to zero and zero size
*/
void Reset()
{
mLocalIndices.Reset();
mTransformationMatrixLocal.resize(0,0, false);
mConstantVectorLocal.resize(0, false);
mMasterEquationIds.clear();
mContainerDofs.clear();
}
/**
* @brief This function calculates the local indices of a given element or condition
* @param rEquationIds vector of the equation ids
* @param rLocalIndexStructure reference to the structure of LocalIndicesType
*/
void CalculateLocalIndices(EquationIdVectorType& rEquationIds, LocalIndicesType& rLocalIndexStructure, IndexType rTotalNumberOfMasters)
{
CalculateLocalSlaveIndices(rEquationIds, rLocalIndexStructure);
CalculateLocalInternalIndices(rEquationIds, rLocalIndexStructure);
CalculateLocalMasterIndices(rEquationIds, rLocalIndexStructure, rTotalNumberOfMasters);
}
/**
* @brief This function calculates the local slave indices of a given element or condition
* @param rEquationIds vector of the equation ids
* @param rLocalSlaveIndexVector reference to the vector of slave indices
*/
void CalculateLocalSlaveIndices(EquationIdVectorType& rEquationIds, LocalIndicesType& rLocalIndexStructure)
{
KRATOS_TRY
int index = 0;
for (auto &eq_id : rEquationIds)
{
auto global_master_slave_constraint = mrGlobalMasterSlaveConstraints.find(eq_id);
if (global_master_slave_constraint != mrGlobalMasterSlaveConstraints.end())
rLocalIndexStructure.slave_index_vector.push_back(index);
index++;
}
KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise::CalculateLocalSlaveIndices failed ..");
}
/**
* @brief This function calculates the local internal indices of a given element or condition
* @param rEquationIds vector of the equation ids
* @param rLocalIndexStructure reference to the vector of slave indices
*/
void CalculateLocalInternalIndices(EquationIdVectorType& rEquationIds, LocalIndicesType& rLocalIndexStructure)
{
KRATOS_TRY
VectorIndexType local_index_vector(rEquationIds.size());
for (IndexType i = 0; i<rEquationIds.size(); ++i)
local_index_vector[i] = i;
std::sort(local_index_vector.begin(), local_index_vector.end());
std::sort(rLocalIndexStructure.slave_index_vector.begin(), rLocalIndexStructure.slave_index_vector.end());
std::set_difference(local_index_vector.begin(), local_index_vector.end(),
rLocalIndexStructure.slave_index_vector.begin(), rLocalIndexStructure.slave_index_vector.end(),
std::back_inserter(rLocalIndexStructure.internal_index_vector));
KRATOS_CATCH("ResidualBasedBlockBuilderAndSolverWithConstraintsElementWise::CalculateLocalInternalIndices failed ..");
}
/**
* @brief This function calculates the local internal indices of a given element or condition
* @param rEquationIds vector of the equation ids
* @param rLocalIndexStructure reference to the vector of slave indices
* @param rTotalNumberOfMasters total number of masters for the given element or condition.
*/
void CalculateLocalMasterIndices(EquationIdVectorType& rEquationIds, LocalIndicesType& rLocalIndexStructure, IndexType rTotalNumberOfMasters)
{
// Get number of master indices for this current container
rLocalIndexStructure.master_index_vector.reserve(rTotalNumberOfMasters + rEquationIds.size() );
for (IndexType i = rEquationIds.size()-1; i < rEquationIds.size() -rTotalNumberOfMasters; --i)
rLocalIndexStructure.master_index_vector.push_back(i);
}
///@}
};
} // namespace Internals
} // namespace Kratos
#endif // CONSTRAINT_SLAVE_H_INCLUDED
|
nodal_two_step_v_p_strategy_for_FSI.h | //
// Project Name: KratosPFEMFluidDynamicsApplication $
// Last modified by: $Author: AFranci $
// Date: $Date: June 2018 $
// Revision: $Revision: 0.0 $
//
//
#ifndef KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_FOR_FSI_H
#define KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_FOR_FSI_H
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/implicit_solving_strategy.h"
#include "custom_utilities/mesher_utilities.hpp"
#include "custom_utilities/boundary_normals_calculation_utilities.hpp"
#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_for_FSI.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_continuity_for_FSI.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_block_builder_and_solver.h"
#include "custom_utilities/solver_settings.h"
#include "custom_strategies/strategies/gauss_seidel_linear_strategy.h"
#include "pfem_fluid_dynamics_application_variables.h"
#include "nodal_two_step_v_p_strategy.h"
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <fstream>
namespace Kratos
{
///@addtogroup PFEMFluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class NodalTwoStepVPStrategyForFSI : public NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(NodalTwoStepVPStrategyForFSI);
/// Counted pointer of NodalTwoStepVPStrategy
//typedef boost::shared_ptr< NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer;
typedef NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
/// Node type (default is: Node<3>)
typedef Node<3> NodeType;
/// Geometry type (using with given NodeType)
typedef Geometry<NodeType> GeometryType;
typedef std::size_t SizeType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename ImplicitSolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType;
typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mVelocityTolerance;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mPressureTolerance;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mMaxPressureIter;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mDomainSize;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mReformDofSet;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mpMomentumStrategy;
using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mpPressureStrategy;
typedef GeometryType::ShapeFunctionsGradientsType ShapeFunctionDerivativesArrayType;
typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType;
///@}
///@name Life Cycle
///@{
NodalTwoStepVPStrategyForFSI(ModelPart &rModelPart,
SolverSettingsType &rSolverConfig) : BaseType(rModelPart)
{
NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::InitializeStrategy(rSolverConfig);
}
NodalTwoStepVPStrategyForFSI(ModelPart &rModelPart,
/*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/
typename TLinearSolver::Pointer pVelocityLinearSolver,
typename TLinearSolver::Pointer pPressureLinearSolver,
bool ReformDofSet = true,
double VelTol = 0.0001,
double PresTol = 0.0001,
int MaxPressureIterations = 1, // Only for predictor-corrector
unsigned int TimeOrder = 2,
unsigned int DomainSize = 2) : BaseType(rModelPart,
pVelocityLinearSolver,
pPressureLinearSolver,
ReformDofSet,
VelTol,
PresTol,
MaxPressureIterations,
TimeOrder,
DomainSize)
{
KRATOS_TRY;
BaseType::SetEchoLevel(1);
// Check that input parameters are reasonable and sufficient.
this->Check();
bool CalculateNormDxFlag = true;
bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly.
// Additional Typedefs
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef ImplicitSolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
//initializing fractional velocity solution step
typedef Scheme<TSparseSpace, TDenseSpace> SchemeType;
typename SchemeType::Pointer pScheme;
typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>());
pScheme.swap(Temp);
//CONSTRUCTION OF VELOCITY
BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverForFSI<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver));
this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel());
vel_build->SetCalculateReactionsFlag(false);
BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver));
this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel());
pressure_build->SetCalculateReactionsFlag(false);
KRATOS_CATCH("");
}
/// Destructor.
virtual ~NodalTwoStepVPStrategyForFSI() {}
bool SolveSolutionStep() override
{
// Initialize BDF2 coefficients
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED];
bool converged = false;
unsigned int maxNonLinearIterations = mMaxPressureIter;
std::cout << "\n Solve with nodally_integrated_two_step_vp strategy at t=" << currentTime << "s" << std::endl;
if (timeIntervalChanged == true && currentTime > 10 * timeInterval)
{
maxNonLinearIterations *= 2;
}
if (currentTime < 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl;
maxNonLinearIterations *= 3;
}
if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl;
maxNonLinearIterations *= 2;
}
bool momentumConverged = true;
bool continuityConverged = false;
bool fixedTimeStep = false;
double pressureNorm = 0;
double velocityNorm = 0;
// bool momentumAlreadyConverged=false;
// bool continuityAlreadyConverged=false;
/* boost::timer solve_step_time; */
// std::cout<<" InitializeSolutionStep().... "<<std::endl;
InitializeSolutionStep(); // it fills SOLID_NODAL_SFD_NEIGHBOURS_ORDER for solids and NODAL_SFD_NEIGHBOURS_ORDER for fluids and inner solids
for (unsigned int it = 0; it < maxNonLinearIterations; ++it)
{
if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "----- > iteration: " << it << std::endl;
if (it == 0)
{
ComputeNodalVolumeAndAssignFlagToElementType(); // it assings NODAL_VOLUME to fluid and SOLID_NODAL_VOLUME to solid. Interface nodes have both
this->InitializeNonLinearIterations(); // it fills SOLID_NODAL_SFD_NEIGHBOURS for solids and NODAL_SFD_NEIGHBOURS for fluids
}
// std::cout<<" CalcNodalStrainsAndStresses .... "<<std::endl;
CalcNodalStrainsAndStresses(); // it computes stresses and strains for fluid and solid nodes
// std::cout<<" CalcNodalStrainsAndStresses DONE "<<std::endl;
momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep, velocityNorm);
UpdateTopology(rModelPart, BaseType::GetEchoLevel());
// std::cout<<" ComputeNodalVolume .... "<<std::endl;
ComputeNodalVolume();
// std::cout<<" ComputeNodalVolume DONE "<<std::endl;
this->InitializeNonLinearIterations();
// std::cout<<" InitializeNonLinearIterations DONE "<<std::endl;
CalcNodalStrains();
// std::cout<<" CalcNodalStrains DONE "<<std::endl;
if (fixedTimeStep == false)
{
continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations, pressureNorm);
}
// if((momentumConverged==true || it==maxNonLinearIterations-1) && momentumAlreadyConverged==false){
// std::ofstream myfile;
// myfile.open ("momentumConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
// momentumAlreadyConverged=true;
// }
// if((continuityConverged==true || it==maxNonLinearIterations-1) && continuityAlreadyConverged==false){
// std::ofstream myfile;
// myfile.open ("continuityConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
// continuityAlreadyConverged=true;
// }
if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 1))
{
//this->ComputeErrorL2NormCaseImposedG();
//this->ComputeErrorL2NormCasePoiseuille();
this->CalculateAccelerations();
// std::ofstream myfile;
// myfile.open ("maxConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
}
bool hybridMethod = false;
if (hybridMethod == true)
{
if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 0))
{
this->UpdateElementalStressStrain();
}
}
if ((continuityConverged && momentumConverged) && it > 1)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
converged = true;
std::cout << "nodal V-P strategy converged in " << it + 1 << " iterations." << std::endl;
break;
}
if (fixedTimeStep == true)
{
break;
}
}
if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "Convergence tolerance not reached." << std::endl;
if (mReformDofSet)
this->Clear();
/* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */
return converged;
}
void UpdateElementalStressStrain()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
/* itElem-> InitializeElementStrainStressState(); */
itElem->InitializeSolutionStep(rCurrentProcessInfo);
}
}
}
void Initialize() override
{
std::cout << " \n Initialize in nodal_two_step_v_p_strategy_FSI" << std::endl;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size();
unsigned int sizeSDFNeigh = neighbourNodes * dimension;
if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS))
{
Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS);
if (rNodalStress.size() != sizeStrains)
{
rNodalStress.resize(sizeStrains, false);
}
noalias(rNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rNodalStress.size() != sizeStrains)
{
rNodalStress.resize(sizeStrains, false);
}
noalias(rNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS))
{
Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
if (rNodalSFDneighbours.size() != sizeSDFNeigh)
{
rNodalSFDneighbours.resize(sizeSDFNeigh, false);
}
noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
else
{
std::cout << "THIS node does not have NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE))
{
Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
if (rSpatialDefRate.size() != sizeStrains)
{
rSpatialDefRate.resize(sizeStrains, false);
}
noalias(rSpatialDefRate) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD))
{
Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
if (rFgrad.size1() != dimension)
{
rFgrad.resize(dimension, dimension, false);
}
noalias(rFgrad) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (rFgradVel.size1() != dimension)
{
rFgradVel.resize(dimension, dimension, false);
}
noalias(rFgradVel) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_CAUCHY_STRESS))
{
Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
if (rSolidNodalStress.size() != sizeStrains)
{
rSolidNodalStress.resize(sizeStrains, false);
}
noalias(rSolidNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rSolidNodalStress.size() != sizeStrains)
{
rSolidNodalStress.resize(sizeStrains, false);
}
noalias(rSolidNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0;
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0;
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0;
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS))
{
Vector &rSolidNodalSFDneighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
if (rSolidNodalSFDneighbours.size() != sizeSDFNeigh)
{
rSolidNodalSFDneighbours.resize(sizeSDFNeigh, false);
}
noalias(rSolidNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SPATIAL_DEF_RATE))
{
Vector &rSolidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
if (rSolidSpatialDefRate.size() != sizeStrains)
{
rSolidSpatialDefRate.resize(sizeStrains, false);
}
noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD))
{
Matrix &rSolidFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
if (rSolidFgrad.size1() != dimension)
{
rSolidFgrad.resize(dimension, dimension, false);
}
noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rSolidFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (rSolidFgradVel.size1() != dimension)
{
rSolidFgradVel.resize(dimension, dimension, false);
}
noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have SOLID_NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl;
}
AssignMaterialToEachNode(itNode);
}
// }
}
void AssignMaterialToEachNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double deviatoricCoeff = 0;
double volumetricCoeff = 0;
if (itNode->Is(SOLID))
{
const double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
const double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO);
//deviatoricCoeff=deltaT*secondLame
deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5;
//volumetricCoeff=bulk*deltaT=deltaT*(firstLame+2*secondLame/3)
volumetricCoeff = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)) + 2.0 * deviatoricCoeff / 3.0;
}
else if (itNode->Is(FLUID) || itNode->Is(RIGID))
{
deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY);
volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
}
if ((itNode->Is(SOLID) && itNode->Is(RIGID)))
{
itNode->FastGetSolutionStepValue(INTERFACE_NODE) = true;
}
else
{
itNode->FastGetSolutionStepValue(INTERFACE_NODE) = false;
}
const double currFirstLame = volumetricCoeff - 2.0 * deviatoricCoeff / 3.0;
//currFirstLame=deltaT*firstLame
itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) = currFirstLame;
itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT) = deviatoricCoeff;
}
void ComputeNodalVolume()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ElementsArrayType &pElements = rModelPart.Elements();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition);
// #pragma omp parallel
// {
int k = OpenMPUtils::ThisThread();
typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k];
typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1];
for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized
{
Element::GeometryType &geometry = itElem->GetGeometry();
double elementalVolume = 0;
if (dimension == 2)
{
elementalVolume = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
elementalVolume = geometry.Volume() * 0.25;
}
// index = 0;
unsigned int numNodes = geometry.size();
for (unsigned int i = 0; i < numNodes; i++)
{
double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME);
nodalVolume += elementalVolume;
if (itElem->Is(SOLID))
{
double &solidVolume = geometry(i)->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
solidVolume += elementalVolume;
nodalVolume += -elementalVolume;
// if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// //I have the subtract the solid volume to the nodal volume of the interface fluid nodes because I added it before
// nodalVolume += -elementalVolume;
// }
}
}
}
// }
}
void ComputeNodalVolumeAndAssignFlagToElementType()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ElementsArrayType &pElements = rModelPart.Elements();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition);
// #pragma omp parallel
// {
int k = OpenMPUtils::ThisThread();
typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k];
typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1];
for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized
{
Element::GeometryType &geometry = itElem->GetGeometry();
double elementalVolume = 0;
if (dimension == 2)
{
elementalVolume = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
elementalVolume = geometry.Volume() * 0.25;
}
// index = 0;
unsigned int numNodes = geometry.size();
unsigned int fluidNodes = 0;
unsigned int solidNodes = 0;
unsigned int interfaceNodes = 0;
for (unsigned int i = 0; i < numNodes; i++)
{
if ((geometry(i)->Is(FLUID) && geometry(i)->IsNot(SOLID)) || (geometry(i)->Is(FLUID) && geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE) == true))
{
fluidNodes += 1;
}
if (geometry(i)->Is(SOLID))
{
solidNodes += 1;
}
if (geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
interfaceNodes += 1;
}
}
if (solidNodes == numNodes)
{
itElem->Set(SOLID);
// std::cout<<"THIS SOLID ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
if (interfaceNodes == numNodes)
{
itElem->Set(SOLID);
// std::cout<<"THIS INTERFACE ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
if (fluidNodes == numNodes)
{
itElem->Set(FLUID);
// std::cout<<"THIS FLUID ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
if (solidNodes == numNodes && fluidNodes == numNodes)
{
itElem->Reset(FLUID);
// std::cout<<"THIS ELEMENT WAS BOTH FLUID AND SOLID "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl;
}
for (unsigned int i = 0; i < numNodes; i++)
{
double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME);
nodalVolume += elementalVolume;
if (itElem->Is(SOLID))
{
double &solidVolume = geometry(i)->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
solidVolume += elementalVolume;
nodalVolume += -elementalVolume;
// if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// //I have the subtract the solid volume to the nodal volume of the interface fluid nodes because I added it before
// nodalVolume += -elementalVolume;
// }
// if(interfaceNodes==numNodes && solidDensity==0){
// std::cout<<"This interface element has not a correct density....I am assigning it the fluid density----- TODO: IMPROVE IT, TAKE FROM NEIGHBOURS"<<std::endl;
// double density=geometry(i)->FastGetSolutionStepValue(DENSITY);
// geometry(i)->FastGetSolutionStepValue(SOLID_DENSITY)=density;
// }
}
}
}
// }
}
void InitializeSolutionStep() override
{
FillNodalSFDVector();
}
void FillNodalSFDVector()
{
// std::cout << "FillNodalSFDVector(); ... " << std::endl;
ModelPart &rModelPart = BaseType::GetModelPart();
// #pragma omp parallel
// {
// ModelPart::NodeIterator NodesBegin;
// ModelPart::NodeIterator NodesEnd;
// OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
// for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
// {
for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++)
{
this->InitializeNodalVariablesForRemeshedDomain(itNode);
InitializeNodalVariablesForSolidRemeshedDomain(itNode);
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == false)
{
this->SetNeighboursOrderToNode(itNode); // it assigns neighbours to inner nodes, filling NODAL_SFD_NEIGHBOURS_ORDER
if (itNode->Is(SOLID))
{
SetNeighboursOrderToSolidNode(itNode); // it assigns neighbours to solid inner nodes, filling SOLID_NODAL_SFD_NEIGHBOURS_ORDER
}
}
else
{
SetNeighboursOrderToInterfaceNode(itNode); // it assigns neighbours to interface nodes, filling SOLID_NODAL_SFD_NEIGHBOURS_ORDER for solids and NODAL_SFD_NEIGHBOURS_ORDER for fluids
}
}
// }
// std::cout << "FillNodalSFDVector(); DONE " << std::endl;
}
void SetNeighboursOrderToSolidNode(ModelPart::NodeIterator itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1; // +1 becausealso the node itself must be considered as nieghbor node
Vector &rNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
if (rNodeOrderedNeighbours.size() != neighbourNodes)
rNodeOrderedNeighbours.resize(neighbourNodes, false);
noalias(rNodeOrderedNeighbours) = ZeroVector(neighbourNodes);
rNodeOrderedNeighbours[0] = itNode->Id();
if (neighbourNodes > 1)
{
for (unsigned int k = 0; k < neighbourNodes - 1; k++)
{
rNodeOrderedNeighbours[k + 1] = neighb_nodes[k].Id();
}
}
}
void SetNeighboursOrderToInterfaceNode(ModelPart::NodeIterator itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1;
unsigned int fluidCounter = 1;
unsigned int solidCounter = 1;
if (neighbourNodes > 1)
{
for (unsigned int k = 0; k < neighbourNodes - 1; k++)
{
if (neighb_nodes[k].IsNot(SOLID) || neighb_nodes[k].FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
fluidCounter += 1;
}
if (neighb_nodes[k].Is(SOLID))
{
solidCounter += 1;
}
}
}
Vector &rFluidNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
Vector &rSolidNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
if (rFluidNodeOrderedNeighbours.size() != fluidCounter)
rFluidNodeOrderedNeighbours.resize(fluidCounter, false);
if (rSolidNodeOrderedNeighbours.size() != solidCounter)
rSolidNodeOrderedNeighbours.resize(solidCounter, false);
noalias(rFluidNodeOrderedNeighbours) = ZeroVector(fluidCounter);
noalias(rSolidNodeOrderedNeighbours) = ZeroVector(solidCounter);
rFluidNodeOrderedNeighbours[0] = itNode->Id();
rSolidNodeOrderedNeighbours[0] = itNode->Id();
fluidCounter = 0;
solidCounter = 0;
if (neighbourNodes > 1)
{
for (unsigned int k = 0; k < neighbourNodes - 1; k++)
{
if (neighb_nodes[k].IsNot(SOLID) || neighb_nodes[k].FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
fluidCounter += 1;
rFluidNodeOrderedNeighbours[fluidCounter] = neighb_nodes[k].Id();
}
if (neighb_nodes[k].Is(SOLID))
{
solidCounter += 1;
rSolidNodeOrderedNeighbours[solidCounter] = neighb_nodes[k].Id();
}
}
}
// fluidCounter+=1;
// solidCounter+=1;
// ModelPart& rModelPart = BaseType::GetModelPart();
// const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// const unsigned int sizeFluidSDFNeigh=fluidCounter*dimension;
// const unsigned int sizeSolidSDFNeigh=solidCounter*dimension;
// Vector& rFluidNodalSFDneighbours=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
// Vector& rSolidNodalSFDneighbours=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
// if(rFluidNodalSFDneighbours.size() != sizeFluidSDFNeigh)
// rFluidNodalSFDneighbours.resize(sizeFluidSDFNeigh,false);
// if(rSolidNodalSFDneighbours.size() != sizeSolidSDFNeigh)
// rSolidNodalSFDneighbours.resize(sizeSolidSDFNeigh,false);
// noalias(rFluidNodalSFDneighbours)=ZeroVector(sizeFluidSDFNeigh);
// noalias(rSolidNodalSFDneighbours)=ZeroVector(sizeSolidSDFNeigh);
// rFluidNodalSFDneighbours.resize(sizeFluidSDFNeigh,true);
// rSolidNodalSFDneighbours.resize(sizeSolidSDFNeigh,true);
// std::cout<<"rFluidNodeOrderedNeighbours "<<rFluidNodeOrderedNeighbours<<std::endl;
// std::cout<<"rSolidNodeOrderedNeighbours "<<rSolidNodeOrderedNeighbours<<std::endl;
// std::cout<<"rFluidNodalSFDneighbours "<<rFluidNodalSFDneighbours<<std::endl;
// std::cout<<"rSolidNodalSFDneighbours "<<rSolidNodalSFDneighbours<<std::endl;
}
void InitializeNodalVariablesForSolidRemeshedDomain(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1;
unsigned int sizeSDFNeigh = neighbourNodes * dimension;
if (itNode->SolutionStepsDataHas(SOLID_NODAL_CAUCHY_STRESS))
{
Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
if (rSolidNodalStress.size() != sizeStrains)
rSolidNodalStress.resize(sizeStrains, false);
noalias(rSolidNodalStress) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rSolidNodalDevStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rSolidNodalDevStress.size() != sizeStrains)
rSolidNodalDevStress.resize(sizeStrains, false);
noalias(rSolidNodalDevStress) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS))
{
Vector &rSolidNodalSFDneighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
if (rSolidNodalSFDneighbours.size() != sizeSDFNeigh)
rSolidNodalSFDneighbours.resize(sizeSDFNeigh, false);
noalias(rSolidNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS_ORDER))
{
Vector &rSolidNodalSFDneighboursOrder = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
if (rSolidNodalSFDneighboursOrder.size() != neighbourNodes)
rSolidNodalSFDneighboursOrder.resize(neighbourNodes, false);
noalias(rSolidNodalSFDneighboursOrder) = ZeroVector(neighbourNodes);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_SPATIAL_DEF_RATE))
{
Vector &rSolidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
if (rSolidSpatialDefRate.size() != sizeStrains)
rSolidSpatialDefRate.resize(sizeStrains, false);
noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD))
{
Matrix &rSolidFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
if (rSolidFgrad.size1() != dimension)
rSolidFgrad.resize(dimension, dimension, false);
noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rSolidFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (rSolidFgradVel.size1() != dimension)
rSolidFgradVel.resize(dimension, dimension, false);
noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension);
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUMETRIC_DEF_RATE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = 0;
}
if (itNode->SolutionStepsDataHas(SOLID_NODAL_EQUIVALENT_STRAIN_RATE))
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = 0;
}
}
void CalcNodalStrainsAndStresses()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
const double solidNodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
double theta = 0.5;
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
if (nodalVolume > 0)
{
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
Matrix &interfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
Matrix &interfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (interfaceFgrad.size1() != dimension)
interfaceFgrad.resize(dimension, dimension, false);
if (interfaceFgradVel.size1() != dimension)
interfaceFgradVel.resize(dimension, dimension, false);
noalias(interfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(interfaceFgradVel) = ZeroMatrix(dimension, dimension);
//I have to compute the stresses and strains two times because one time is for the solid and the other for the fluid
// Matrix interfaceFgrad=ZeroMatrix(dimension,dimension);
// Matrix interfaceFgradVel=ZeroMatrix(dimension,dimension);
//the following function is more expensive than the general one because there is one loop more over neighbour nodes. This is why I do it here also for fluid interface nodes.
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, nodalSFDneighboursId, rNodalSFDneigh, theta, interfaceFgrad, interfaceFgradVel);
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=interfaceFgrad;
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=interfaceFgradVel;
CalcNodalStrainsAndStressesForInterfaceFluidNode(itNode);
}
if (solidNodalVolume > 0)
{
Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector rSolidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (solidInterfaceFgrad.size1() != dimension)
solidInterfaceFgrad.resize(dimension, dimension, false);
if (solidInterfaceFgradVel.size1() != dimension)
solidInterfaceFgradVel.resize(dimension, dimension, false);
noalias(solidInterfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(solidInterfaceFgradVel) = ZeroMatrix(dimension, dimension);
theta = 1.0;
// Matrix solidInterfaceFgrad=ZeroMatrix(dimension,dimension);
// Matrix solidInterfaceFgradVel=ZeroMatrix(dimension,dimension);
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, solidNodalSFDneighboursId, rSolidNodalSFDneigh, theta, solidInterfaceFgrad, solidInterfaceFgradVel);
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD)=solidInterfaceFgrad;
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL)=solidInterfaceFgradVel;
CalcNodalStrainsAndStressesForInterfaceSolidNode(itNode);
}
}
else
{
if (itNode->Is(SOLID) && solidNodalVolume > 0)
{
theta = 1.0;
ComputeAndStoreNodalDeformationGradientForSolidNode(itNode, theta);
CalcNodalStrainsAndStressesForSolidNode(itNode);
}
else if (nodalVolume > 0)
{
theta = 0.5;
this->ComputeAndStoreNodalDeformationGradient(itNode, theta);
this->CalcNodalStrainsAndStressesForNode(itNode);
}
}
if (nodalVolume == 0 && solidNodalVolume == 0)
{ // if nodalVolume==0
theta = 0.5;
this->InitializeNodalVariablesForRemeshedDomain(itNode);
InitializeNodalVariablesForSolidRemeshedDomain(itNode);
}
// }
// if(itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(INTERFACE_NODE)==false){
// CopyValuesToSolidNonInterfaceNodes(itNode);
// }
}
// }
/* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */
}
void CopyValuesToSolidNonInterfaceNodes(ModelPart::NodeIterator itNode)
{
Vector &solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector &solidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
Vector &solidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
double &volumetricDefRate = itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE);
Vector &solidCauchyStress = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS);
Vector &solidDeviatoricCauchyStress = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS);
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
unsigned int sizeNodalSFDneighboursId = nodalSFDneighboursId.size();
solidNodalSFDneighboursId.resize(sizeNodalSFDneighboursId, false);
Vector nodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
unsigned int sizeNodalSFDneigh = nodalSFDneigh.size();
solidNodalSFDneigh.resize(sizeNodalSFDneigh, false);
solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
solidNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
solidInterfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
solidSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
volumetricDefRate = itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE);
solidCauchyStress = itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS);
solidDeviatoricCauchyStress = itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS);
}
void CalcNodalStrainsAndStressesForInterfaceFluidNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY);
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double currFirstLame = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
Matrix Fgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]));
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
// if(itNode->Is(SOLID))
// {
// nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0];
// nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1];
// nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2];
// nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0];
// nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1];
// nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2];
// }
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_xy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_xy;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]);
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5];
// if(itNode->Is(SOLID))
// {
// nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0];
// nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1];
// nodalSigmaTot_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2];
// nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[3];
// nodalSigmaTot_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[4];
// nodalSigmaTot_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[5];
// nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0];
// nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1];
// nodalSigmaDev_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2];
// nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[3];
// nodalSigmaDev_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[4];
// nodalSigmaDev_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[5];
// }
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_zz;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[3] = nodalSigmaTot_xy;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[4] = nodalSigmaTot_xz;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[5] = nodalSigmaTot_yz;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_zz;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[3] = nodalSigmaDev_xy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[4] = nodalSigmaDev_xz;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[5] = nodalSigmaDev_yz;
}
}
void CalcNodalStrainsAndStressesForInterfaceSolidNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
const double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
const double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO);
const double currFirstLame = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio));
double deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5;
Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
auto &r_stain_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
r_stain_tensor2D[0] = SpatialVelocityGrad(0, 0);
r_stain_tensor2D[1] = SpatialVelocityGrad(1, 1);
r_stain_tensor2D[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
}
auto &r_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0);
r_stress_tensor2D[0] = nodalSigmaTot_xx;
r_stress_tensor2D[1] = nodalSigmaTot_yy;
r_stress_tensor2D[2] = nodalSigmaTot_xy;
auto &r_dev_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0);
r_dev_stress_tensor2D[0] = nodalSigmaDev_xx;
r_dev_stress_tensor2D[1] = nodalSigmaDev_yy;
r_dev_stress_tensor2D[2] = nodalSigmaDev_xy;
}
else if (dimension == 3)
{
auto &r_stain_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
r_stain_tensor3D[0] = SpatialVelocityGrad(0, 0);
r_stain_tensor3D[1] = SpatialVelocityGrad(1, 1);
r_stain_tensor3D[2] = SpatialVelocityGrad(2, 2);
r_stain_tensor3D[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
r_stain_tensor3D[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
r_stain_tensor3D[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_zz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[3];
nodalSigmaTot_xz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[4];
nodalSigmaTot_yz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[5];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_zz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[3];
nodalSigmaDev_xz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[4];
nodalSigmaDev_yz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[5];
}
auto &r_stress_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0);
r_stress_tensor3D[0] = nodalSigmaTot_xx;
r_stress_tensor3D[1] = nodalSigmaTot_yy;
r_stress_tensor3D[2] = nodalSigmaTot_zz;
r_stress_tensor3D[3] = nodalSigmaTot_xy;
r_stress_tensor3D[4] = nodalSigmaTot_xz;
r_stress_tensor3D[5] = nodalSigmaTot_yz;
auto &r_dev_stress_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0);
r_dev_stress_tensor3D[0] = nodalSigmaDev_xx;
r_dev_stress_tensor3D[1] = nodalSigmaDev_yy;
r_dev_stress_tensor3D[2] = nodalSigmaDev_zz;
r_dev_stress_tensor3D[3] = nodalSigmaDev_xy;
r_dev_stress_tensor3D[4] = nodalSigmaDev_xz;
r_dev_stress_tensor3D[5] = nodalSigmaDev_yz;
}
}
void CalcNodalStrainsAndStressesForSolidNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
const double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
const double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO);
const double currFirstLame = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio));
double deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5;
Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
// if(itNode->Is(SOLID)){
// std::cout<<"solid node"<<std::endl;
// }
// if(itNode->Is(FLUID)){
// std::cout<<"FLUID node"<<std::endl;
// }
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// std::cout<<"currFirstLame "<<currFirstLame<<" deviatoricCoeff "<<deviatoricCoeff<<std::endl;
// }else{
// std::cout<<"NOT INTERFACE currFirstLame "<<currFirstLame<<" deviatoricCoeff "<<deviatoricCoeff<<std::endl;
// }
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
auto &r_stain_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
r_stain_tensor2D[0] = SpatialVelocityGrad(0, 0);
r_stain_tensor2D[1] = SpatialVelocityGrad(1, 1);
r_stain_tensor2D[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
}
auto &r_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0);
r_stress_tensor2D[0] = nodalSigmaTot_xx;
r_stress_tensor2D[1] = nodalSigmaTot_yy;
r_stress_tensor2D[2] = nodalSigmaTot_xy;
auto &r_dev_stress_tensor2D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0);
r_dev_stress_tensor2D[0] = nodalSigmaDev_xx;
r_dev_stress_tensor2D[1] = nodalSigmaDev_yy;
r_dev_stress_tensor2D[2] = nodalSigmaDev_xy;
}
else if (dimension == 3)
{
auto &r_stain_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
r_stain_tensor3D[0] = SpatialVelocityGrad(0, 0);
r_stain_tensor3D[1] = SpatialVelocityGrad(1, 1);
r_stain_tensor3D[2] = SpatialVelocityGrad(2, 2);
r_stain_tensor3D[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
r_stain_tensor3D[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
r_stain_tensor3D[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
const double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
const double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
const double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE);
const double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
const double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5];
if (itNode->Is(SOLID))
{
nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0];
nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1];
nodalSigmaTot_zz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2];
nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[3];
nodalSigmaTot_xz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[4];
nodalSigmaTot_yz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[5];
nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0];
nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1];
nodalSigmaDev_zz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2];
nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[3];
nodalSigmaDev_xz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[4];
nodalSigmaDev_yz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[5];
}
auto &r_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0);
r_tensor3D[0] = nodalSigmaTot_xx;
r_tensor3D[1] = nodalSigmaTot_yy;
r_tensor3D[2] = nodalSigmaTot_zz;
r_tensor3D[3] = nodalSigmaTot_xy;
r_tensor3D[4] = nodalSigmaTot_xz;
r_tensor3D[5] = nodalSigmaTot_yz;
auto &r_dev_tensor3D = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0);
r_dev_tensor3D[0] = nodalSigmaDev_xx;
r_dev_tensor3D[1] = nodalSigmaDev_yy;
r_dev_tensor3D[2] = nodalSigmaDev_zz;
r_dev_tensor3D[3] = nodalSigmaDev_xy;
r_dev_tensor3D[4] = nodalSigmaDev_xz;
r_dev_tensor3D[5] = nodalSigmaDev_yz;
}
}
void CalcNodalStrainsForSolidNode(ModelPart::NodeIterator itNode)
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// Matrix Fgrad=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
// Matrix FgradVel=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
// double detFgrad=1.0;
// Matrix InvFgrad=ZeroMatrix(dimension,dimension);
// Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension);
double detFgrad = 1.0;
Matrix nodalFgrad = ZeroMatrix(dimension, dimension);
Matrix FgradVel = ZeroMatrix(dimension, dimension);
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
nodalFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
//Inverse
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(nodalFgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(nodalFgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
const double DefVol = DefX + DefY;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
const double DefZ = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
const double DefVol = DefX + DefY + DefZ;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
}
void CalcNodalStrainsForInterfaceSolidNode(ModelPart::NodeIterator itNode)
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
//Inverse
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]));
const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
const double DefVol = DefX + DefY;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]);
const double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0];
const double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1];
const double DefZ = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2];
const double DefVol = DefX + DefY + DefZ;
itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
/* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */
}
void CalcNodalStrains()
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
const double solidNodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
double theta = 1.0;
if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
if (nodalVolume > 0)
{
//I have to compute the strains two times because one time is for the solid and the other for the fluid
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
Matrix &interfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
Matrix &interfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (interfaceFgrad.size1() != dimension)
interfaceFgrad.resize(dimension, dimension, false);
if (interfaceFgradVel.size1() != dimension)
interfaceFgradVel.resize(dimension, dimension, false);
noalias(interfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(interfaceFgradVel) = ZeroMatrix(dimension, dimension);
// Matrix interfaceFgrad = ZeroMatrix(dimension,dimension);
// Matrix interfaceFgradVel = ZeroMatrix(dimension,dimension);
//the following function is more expensive than the general one because there is one loop more over neighbour nodes. This is why I do it here also for fluid interface nodes.
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, nodalSFDneighboursId, rNodalSFDneigh, theta, interfaceFgrad, interfaceFgradVel);
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=interfaceFgrad;
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=interfaceFgradVel;
this->CalcNodalStrainsForNode(itNode);
}
if (solidNodalVolume > 0)
{
Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector rSolidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
if (solidInterfaceFgrad.size1() != dimension)
solidInterfaceFgrad.resize(dimension, dimension, false);
if (solidInterfaceFgradVel.size1() != dimension)
solidInterfaceFgradVel.resize(dimension, dimension, false);
noalias(solidInterfaceFgrad) = ZeroMatrix(dimension, dimension);
noalias(solidInterfaceFgradVel) = ZeroMatrix(dimension, dimension);
// Matrix solidInterfaceFgrad = ZeroMatrix(dimension,dimension);
// Matrix solidInterfaceFgradVel = ZeroMatrix(dimension,dimension);
ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, solidNodalSFDneighboursId, rSolidNodalSFDneigh, theta, solidInterfaceFgrad, solidInterfaceFgradVel);
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD)=solidInterfaceFgrad;
// itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL)=solidInterfaceFgradVel;
CalcNodalStrainsForInterfaceSolidNode(itNode);
}
}
else
{
if (itNode->Is(SOLID) && solidNodalVolume > 0)
{
ComputeAndStoreNodalDeformationGradientForSolidNode(itNode, theta);
CalcNodalStrainsForSolidNode(itNode);
}
else if (nodalVolume > 0)
{
this->ComputeAndStoreNodalDeformationGradient(itNode, theta);
this->CalcNodalStrainsForNode(itNode);
}
}
if (nodalVolume == 0 && solidNodalVolume == 0)
{ // if nodalVolume==0
this->InitializeNodalVariablesForRemeshedDomain(itNode);
InitializeNodalVariablesForSolidRemeshedDomain(itNode);
}
// if(itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(INTERFACE_NODE)==false){
// CopyValuesToSolidNonInterfaceNodes(itNode);
// }
}
// }
/* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */
}
void ComputeAndStoreNodalDeformationGradientForSolidNode(ModelPart::NodeIterator itNode, double theta)
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
/* unsigned int idThisNode=nodalSFDneighboursId[0]; */
const unsigned int neighSize = nodalSFDneighboursId.size();
Matrix Fgrad = ZeroMatrix(dimension, dimension);
Matrix FgradVel = ZeroMatrix(dimension, dimension);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
if (dimension == 2)
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
unsigned int firstRow = 2;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
unsigned int neigh_nodes_id = neighb_nodes[i].Id();
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1];
if (neigh_nodes_id != other_neigh_nodes_id)
{
std::cout << "node (x,y)=(" << itNode->X() << "," << itNode->Y() << ") with neigh_nodes_id " << neigh_nodes_id << " different than other_neigh_nodes_id " << other_neigh_nodes_id << std::endl;
}
Fgrad(0, 0) += dNdXi * neighb_nodes[i].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[i].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y();
VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
firstRow += 2;
}
}
}
else
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
double dNdZi = rNodalSFDneigh[2];
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(0, 2) += dNdZi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
Fgrad(1, 2) += dNdZi * itNode->Y();
Fgrad(2, 0) += dNdXi * itNode->Z();
Fgrad(2, 1) += dNdYi * itNode->Z();
Fgrad(2, 2) += dNdZi * itNode->Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
unsigned int firstRow = 3;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++)
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
dNdZi = rNodalSFDneigh[firstRow + 2];
VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
VelocityZ = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * neighb_nodes[i].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[i].X();
Fgrad(0, 2) += dNdZi * neighb_nodes[i].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y();
Fgrad(1, 2) += dNdZi * neighb_nodes[i].Y();
Fgrad(2, 0) += dNdXi * neighb_nodes[i].Z();
Fgrad(2, 1) += dNdYi * neighb_nodes[i].Z();
Fgrad(2, 2) += dNdZi * neighb_nodes[i].Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
firstRow += 3;
}
}
}
itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD) = Fgrad;
itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL) = FgradVel;
KRATOS_CATCH("");
}
void ComputeAndStoreNodalDeformationGradientForInterfaceNode(ModelPart::NodeIterator itNode, Vector nodalSFDneighboursId, Vector rNodalSFDneigh, double theta, Matrix &Fgrad, Matrix &FgradVel)
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
/* unsigned int idThisNode=nodalSFDneighboursId[0]; */
const unsigned int neighSize = nodalSFDneighboursId.size();
noalias(Fgrad) = ZeroMatrix(dimension, dimension);
noalias(FgradVel) = ZeroMatrix(dimension, dimension);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
const unsigned int neighNodesSize = neighb_nodes.size();
if (dimension == 2)
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
unsigned int firstRow = 2;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning
{
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1];
for (unsigned int k = 0; k < neighNodesSize; k++)
{
unsigned int neigh_nodes_id = neighb_nodes[k].Id();
if (neigh_nodes_id == other_neigh_nodes_id)
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
Fgrad(0, 0) += dNdXi * neighb_nodes[k].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[k].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[k].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[k].Y();
VelocityX = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
firstRow += 2;
break;
}
}
}
}
}
else
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
double dNdZi = rNodalSFDneigh[2];
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(0, 2) += dNdZi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
Fgrad(1, 2) += dNdZi * itNode->Y();
Fgrad(2, 0) += dNdXi * itNode->Z();
Fgrad(2, 1) += dNdYi * itNode->Z();
Fgrad(2, 2) += dNdZi * itNode->Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
unsigned int firstRow = 3;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++)
{
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1];
for (unsigned int k = 0; k < neighNodesSize; k++)
{
unsigned int neigh_nodes_id = neighb_nodes[k].Id();
if (neigh_nodes_id == other_neigh_nodes_id)
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
dNdZi = rNodalSFDneigh[firstRow + 2];
VelocityX = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
VelocityZ = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * neighb_nodes[k].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[k].X();
Fgrad(0, 2) += dNdZi * neighb_nodes[k].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[k].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[k].Y();
Fgrad(1, 2) += dNdZi * neighb_nodes[k].Y();
Fgrad(2, 0) += dNdXi * neighb_nodes[k].Z();
Fgrad(2, 1) += dNdYi * neighb_nodes[k].Z();
Fgrad(2, 2) += dNdZi * neighb_nodes[k].Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
firstRow += 3;
break;
}
}
}
}
}
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=Fgrad;
// itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=FgradVel;
KRATOS_CATCH("");
}
void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel)
{
KRATOS_TRY;
// std::cout<<" UpdateTopology ..."<<std::endl;
/* this->CalculateDisplacements(); */
CalculateDisplacementsAndResetNodalVariables();
BaseType::MoveMesh();
BoundaryNormalsCalculationUtilities BoundaryComputation;
BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel);
// std::cout<<" UpdateTopology DONE"<<std::endl;
KRATOS_CATCH("");
}
void CalculateDisplacementsAndResetNodalVariables()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator i = NodesBegin; i != NodesEnd; ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0];
CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1];
if (dimension == 3)
{
CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2];
}
///// reset Nodal variables //////
Vector &rNodalSFDneighbours = i->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
unsigned int sizeSDFNeigh = rNodalSFDneighbours.size();
// unsigned int neighbourNodes=i->GetValue(NEIGHBOUR_NODES).size()+1;
// unsigned int sizeSDFNeigh=neighbourNodes*dimension;
i->FastGetSolutionStepValue(NODAL_VOLUME) = 0;
i->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0;
i->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0;
i->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0;
i->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0;
noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
Vector &rSpatialDefRate = i->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
noalias(rSpatialDefRate) = ZeroVector(sizeStrains);
Matrix &rFgrad = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
noalias(rFgrad) = ZeroMatrix(dimension, dimension);
Matrix &rFgradVel = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
noalias(rFgradVel) = ZeroMatrix(dimension, dimension);
// if(i->FastGetSolutionStepValue(INTERFACE_NODE)==true){
Vector &rSolidNodalSFDneighbours = i->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS);
unsigned int solidSizeSDFNeigh = rSolidNodalSFDneighbours.size();
// unsigned int solidSizeSDFNeigh=solidNeighbourNodes*dimension;
i->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = 0;
i->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = 0;
noalias(rSolidNodalSFDneighbours) = ZeroVector(solidSizeSDFNeigh);
Vector &rSolidSpatialDefRate = i->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE);
noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains);
Matrix &rSolidFgrad = i->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD);
noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension);
Matrix &rSolidFgradVel = i->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL);
noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension);
// }
}
// }
}
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "NodalTwoStepVPStrategyForFSI";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream &rOStream) const override
{
rOStream << "NodalTwoStepVPStrategyForFSI";
}
// /// Print object's data.
// void PrintData(std::ostream& rOStream) const override
// {
// }
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
NodalTwoStepVPStrategyForFSI &operator=(NodalTwoStepVPStrategyForFSI const &rOther) {}
/// Copy constructor.
NodalTwoStepVPStrategyForFSI(NodalTwoStepVPStrategyForFSI const &rOther) {}
///@}
}; /// Class NodalTwoStepVPStrategyForFSI
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
|
metrics.h | /*
* Created on: Feb 06, 2017
* Author: Steffen Rechner <steffen.rechner@informatik.uni-halle.de>
*
* This file is part of the marathon software.
*
* Copyright (c) 2016, Steffen Rechner
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef BINARY_MATRIX_METRICS_H_
#define BINARY_MATRIX_METRICS_H_
#ifdef USE_ARMADILLO
#define ARMA_DONT_PRINT_ERRORS
#include <armadillo>
#endif
#include "binary_matrix.h"
#include "marathon/rational.h"
#include "marathon/integer.h"
namespace marathon {
namespace binary_matrix {
/**
* Return the number of 2x2 submatrices of the form
* (0 1) or (1 0)
* (1 0) (0 1)
* normalized by the total number of 2x2 submatrices.
* @param bin Binary Matrix.
* @return The rate of checkerboard units.
*/
template<class T=double>
T checkerBoardRate(const BinaryMatrix &bin) {
const size_t nrow = bin.getNumRows();
const size_t ncol = bin.getNumCols();
size_t num = 0;
#pragma omp parallel
{
size_t my_num = 0;
#pragma omp for
for (int i1 = 0; i1 < nrow; i1++) {
for (int i2 = i1 + 1; i2 < nrow; i2++) {
for (int j1 = 0; j1 < ncol; j1++) {
for (int j2 = j1 + 1; j2 < ncol; j2++) {
if (bin.isCheckerBoardUnit(i1, j1, i2, j2)) {
my_num++;
}
}
}
}
}
#pragma omp critical
num += my_num;
}
return T(num * 4) / T(nrow * (nrow - 1) * ncol * (ncol - 1));
}
/**
* Calculate the nestedness defined by Robert and Stone, 1990.
*
* A. Roberts and L. Stone.
* Island-sharing by archipelago species. Oecologia 83 (1990), 560–567. doi: 10.1007/bf00317210.
*
* @param bin Binary Matrix.
* @return S2 Nestedness score.
*/
template<class T=double>
T nestednessS2(const BinaryMatrix &bin) {
const size_t nrow = bin.getNumRows();
const size_t ncol = bin.getNumCols();
size_t S = 0;
for (int i = 0; i < nrow; i++) {
for (int j = i + 1; j < nrow; j++) {
int sij = 0;
for (int k = 0; k < ncol; k++) {
sij += bin.get(i, k) * bin.get(j, k);
}
S += sij * sij;
}
}
return T(S * 2) / T(nrow * (nrow - 1));
}
/**
* Calculate the nestedness defined by Patterson and Attmar.
*
* B. D. Patterson and W. Atmar.
* Nested subsets and the structure of insular mammalian faunas and archipelagos.
* Biological Journal of the Linnean Society 28 (1986), 65–82.
* doi: 10.1111/j.1095-8312.1986.tb01749.x.
*
* @param bin Binary Matrix.
* @return Nestedness score.
*/
template<class T=double>
T nestedSubset(const BinaryMatrix &bin) {
const size_t nrow = bin.getNumRows();
const size_t ncol = bin.getNumCols();
// determine column sums
int *colsum = new int[ncol];
memset(colsum, 0, ncol * sizeof(int));
for (int i = 0; i < nrow; i++) {
for (int j = 0; j < ncol; j++) {
colsum[j] += bin.get(i, j);
}
}
// for each row: determine minimal column sum
int *m = new int[nrow];
for (int i = 0; i < nrow; i++) {
m[i] = nrow;
for (int j = 0; j < ncol; j++) {
if (bin.get(i, j)) {
m[i] = std::min(m[i], colsum[j]);
}
}
}
// calculate nestedness
size_t S = 0;
for (int i = 0; i < nrow; i++) {
for (int j = 0; j < ncol; j++) {
if (!bin.get(i, j) && colsum[j] > m[i]) {
S++;
}
}
}
delete[] colsum;
delete[] m;
return T(S);
}
/**
* Calculate the number of entries that differ from each other.
* @param bin1 A binary matrix.
* @param bin2 A binary matrix.
* @return Hamming distance between matrices.
*/
size_t hammingDistance(const BinaryMatrix &bin1, const BinaryMatrix &bin2) {
boost::dynamic_bitset<> x = bin1.getBitset() ^ bin2.getBitset();
return x.count();
}
/**
* Calculate the nestedness measure based on overlap and decreasing fill.
*
* M. Almeida-Neto, P. Guimarães, P. R. Guimarães, R. D. Loyola, and W. Ulrich.
* A consistent metric for nestedness analysis in ecological systems: reconciling
* concept and measurement.
* Oikos 117 (2008), 1227–1239. doi: 10.1111/j.0030-1299.2008.16644.x.
*
* @param bin A binary matrix.
* @return NODF
*/
template<class T=double>
T NODF(const BinaryMatrix &bin) {
const int nrow = bin.getNumRows();
const int ncol = bin.getNumCols();
// determine row and column sums
int *rowsum = new int[nrow];
int *colsum = new int[ncol];
memset(rowsum, 0, nrow * sizeof(int));
memset(colsum, 0, ncol * sizeof(int));
for (int i = 0; i < nrow; i++) {
for (int j = 0; j < ncol; j++) {
if (bin.get(i, j)) {
rowsum[i]++;
colsum[j]++;
}
}
}
/*
* score_col is the sum of scores from pairwise comparison of
* each column j1 against all columns j2 to its right.
* If the degree of column j1 is smaller or equal to the degree
* of column j2, then the score is zero.
* If they have different degrees, the score is the percentage
* of elements in the second column which also appear in the first
* column.
* score_rows is found similarly for pairwise comparisons of each
* row against all rows below it.
* The sum of score_col and score_row is then normalized by the total
* number of pairwise comparisons.
*/
T score_col = 0;
T score_row = 0;
// for each pair of columns
for (int j1 = 0; j1 < ncol; j1++) {
for (int j2 = j1 + 1; j2 < ncol; j2++) {
// if the degree of column j1 is smaller or equal to the degree of column j2
if (colsum[j1] <= colsum[j2]) {
// score_col += 0
} else {
// determine the percentage of elements in the
// second column which also appear in the first
int score = 0;
for (int i = 0; i < nrow; i++) {
if (bin.get(i, j2) && bin.get(i, j1)) {
score++;
}
}
score_col += T(score) / T(colsum[j2]);
}
}
}
// for each pair of rows
for (int i1 = 0; i1 < nrow; i1++) {
for (int i2 = i1 + 1; i2 < nrow; i2++) {
// if both rows have the same degree
if (rowsum[i1] <= rowsum[i2]) {
// score_row += 0
} else {
// determine the percentage of elements in the
// second row which also appear in the first
int score = 0;
for (int j = 0; j < ncol; j++) {
if (bin.get(i1, j) && bin.get(i2, j)) {
score++;
}
}
score_row += T(score) / T(rowsum[i2]);
}
}
}
delete[] rowsum;
delete[] colsum;
T nodf = T(100 * 2) * (score_row + score_col) / T(nrow * (nrow - 1) + ncol * (ncol - 1));
return nodf;
}
#ifdef USE_ARMADILLO
/**
* Calculate the spectral radius of the symmetric adjacency matrix
* defined by expanding the bi-adjacency matrix bin.
*
* P. P. Staniczenko, J. C. Kopp, and S. Allesina. The ghost of nestedness in
* ecological networks. Nature Communications 4 (Jan. 2013), 1391.
* doi: 10.1038/ncomms2422.
*
* @tparam T Can be float or double.
* @param bin A binary matrix.
* @return The absolute value of the maximum real eigenvalue
* from the adjacency matrix defined by bin.
*/
template<class T=double>
T spectralRadius(const BinaryMatrix &bin) {
const int nrow = bin.getNumRows();
const int ncol = bin.getNumCols();
const int total = bin.getTotal();
// transform binary matrix into armadillo sparse matrix format
arma::umat locations(2, 2*total);
arma::Col<T> values(2*total);
int k = 0;
for(int i=0; i<nrow; i++) {
for(int j=0; j<ncol; j++) {
if(bin.get(i,j)) {
locations(0, k) = i;
locations(1, k) = nrow + j;
values(k) = T(1);
k++;
locations(0, k) = nrow + j;
locations(1, k) = i;
values(k) = T(1);
k++;
}
}
}
// define sparse matrix
arma::SpMat<T> A(locations, values, nrow+ncol, nrow+ncol);
// find largest eigenvalues
arma::Col<T> eigval;
k=1; // number of eigenvalues
const int K_MAX = 10; // maximal number of eigenvalues
// while not successful calculate the largest k eigenvalues
while(!arma::eigs_sym(eigval, A, k) || eigval.size() != k) {
k++;
if(k > K_MAX) {
throw std::runtime_error("Error while calculating eigenvalues!");
}
}
return fabs(eigval[k-1]);
}
#endif
}
}
#endif /* BINARY_MATRIX_METRICS_H_ */
|
imageHelper.h | #pragma once
class ImageHelper
{
public:
ImageHelper() {}
~ImageHelper() {}
static inline float gaussR(float sigma, float dist)
{
return exp(-(dist*dist) / (2.0f*sigma*sigma));
}
static inline float gaussR(float sigma, const vec3f& d)
{
float dist = d.length();
return exp(-(dist*dist) / (2.0f*sigma*sigma));
}
static inline float gaussR(float sigma, const vec3uc& d)
{
vec3f _d(d); //_d /= 255.0f;
float dist = _d.length();
return exp(-(dist*dist) / (2.0f*sigma*sigma));
}
static inline float linearR(float sigma, float dist)
{
return std::max(1.0f, std::min(0.0f, 1.0f - (dist*dist) / (2.0f*sigma*sigma)));
}
static inline float gaussD(float sigma, int x, int y)
{
return exp(-((x*x + y*y) / (2.0f*sigma*sigma)));
}
static inline float gaussD(float sigma, int x)
{
return exp(-((x*x) / (2.0f*sigma*sigma)));
}
static void gaussFilter(BaseImage<float>& img, float sigmaD) {
BaseImage<float> res(img.getDimensions());
res.setInvalidValue(img.getInvalidValue());
const int kernelRadius = (int)ceil(2.0*sigmaD);
#pragma omp parallel for
for (int _y = 0; _y < (int)img.getHeight(); _y++) {
unsigned int y = (unsigned int)_y;
for (unsigned int x = 0; x < img.getWidth(); x++) {
res.setInvalid(x, y);
float sum = 0.0f;
float sumWeight = 0.0f;
if (img.isValid(x, y)) {
const float center = img(x, y);
for (int m = x - kernelRadius; m <= (int)x + kernelRadius; m++) {
for (int n = y - kernelRadius; n <= (int)y + kernelRadius; n++) {
if (m >= 0 && n >= 0 && m < (int)img.getWidth() && n < (int)img.getHeight()) {
if (img.isValid(m, n)) {
const float current = img(m, n);
const float weight = gaussD(sigmaD, m - x, n - y);
sumWeight += weight;
sum += weight*current;
}
}
}
}
if (sumWeight > 0.0f) res(x, y) = sum / sumWeight;
}
}
}
img = res;
}
static void bilateralFilter(DepthImage32& d, float sigmaD, float sigmaR) {
DepthImage32 res(d.getWidth(), d.getHeight());
res.setInvalidValue(d.getInvalidValue());
const int kernelRadius = (int)ceil(2.0*sigmaD);
#pragma omp parallel for
for (int _y = 0; _y < (int)d.getHeight(); _y++) {
unsigned int y = (unsigned int)_y;
for (unsigned int x = 0; x < d.getWidth(); x++) {
res.setInvalid(x, y);
float sum = 0.0f;
float sumWeight = 0.0f;
if (d.isValid(x, y)) {
const float center = d(x, y);
for (int m = x - kernelRadius; m <= (int)x + kernelRadius; m++) {
for (int n = y - kernelRadius; n <= (int)y + kernelRadius; n++) {
if (m >= 0 && n >= 0 && m < (int)d.getWidth() && n < (int)d.getHeight()) {
if (d.isValid(m, n)) {
const float current = d(m, n);
const float weight = gaussD(sigmaD, m - x, n - y)*gaussR(sigmaR, current - center);
sumWeight += weight;
sum += weight*current;
}
}
}
}
if (sumWeight > 0.0f) res(x, y) = sum / sumWeight;
}
}
}
d = res;
}
static void erode(DepthImage32& depth, unsigned int numIter = 2) {
numIter = 2 * ((numIter + 1) / 2);
DepthImage32 tmp; tmp.setInvalidValue(depth.getInvalidValue());
for (unsigned int i = 0; i < numIter; i++) {
if (i % 2 == 0) {
erode(tmp, depth, 3, 0.05f, 0.3f);
}
else {
erode(depth, tmp, 3, 0.05f, 0.3f);
}
}
}
static BaseImage<float> convertToGrayscale(const ColorImageR8G8B8& image) {
BaseImage<float> res(image.getWidth(), image.getHeight());
res.setInvalidValue(0.0f);
for (const auto& p : image) {
float v = (0.299f*p.value.x + 0.587f*p.value.y + 0.114f*p.value.z) / 255.0f;
res(p.x, p.y) = v;
}
return res;
}
static BaseImage<float> computeGradientMagnitude(const ColorImageR32& image)
{
BaseImage<float> res(image.getWidth(), image.getHeight());
res.setInvalidValue(-std::numeric_limits<float>::infinity());
res.setPixels(res.getInvalidValue());
const auto invalid = image.getInvalidValue();
#pragma omp parallel for
for (int _y = 0; _y < (int)image.getHeight(); _y++) {
unsigned int y = (unsigned int)_y;
for (unsigned int x = 0; x < image.getWidth(); x++) {
if (x > 0 && x < image.getWidth() - 1 && y > 0 && y < image.getHeight() - 1) {
float pos00 = image(x - 1, y - 1); if (pos00 == invalid) continue;
float pos01 = image(x - 1, y - 0); if (pos01 == invalid) continue;
float pos02 = image(x - 1, y + 1); if (pos02 == invalid) continue;
float pos10 = image(x - 0, y - 1); if (pos10 == invalid) continue;
float pos12 = image(x - 0, y + 1); if (pos12 == invalid) continue;
float pos20 = image(x + 1, y - 1); if (pos20 == invalid) continue;
float pos21 = image(x + 1, y - 0); if (pos21 == invalid) continue;
float pos22 = image(x + 1, y + 1); if (pos22 == invalid) continue;
float resU = (-1.0f)*pos00 + (1.0f)*pos20 +
(-2.0f)*pos01 + (2.0f)*pos21 +
(-1.0f)*pos02 + (1.0f)*pos22;
resU /= 8.0f;
float resV = (-1.0f)*pos00 + (-2.0f)*pos10 + (-1.0f)*pos20 +
(1.0f)*pos02 + (2.0f)*pos12 + (1.0f)*pos22;
resV /= 8.0f;
res(x, y) = vec2f(resU, resV).length();
}
}
}
return res;
}
static void computeImageStatistics(const BaseImage<float>& image)
{
float min = std::numeric_limits<float>::infinity(), max = -std::numeric_limits<float>::infinity();
float mean = 0.0f; unsigned int count = 0;
for (const auto& p : image) {
if (p.value != image.getInvalidValue()) {
if (p.value < min) min = p.value;
if (p.value > max) max = p.value;
mean += p.value;
count++;
}
}
mean /= count;
std::cout << "image range [" << min << ", " << max << "]" << std::endl;
std::cout << "mean value = " << mean << " (" << count << "/" << image.getNumPixels() << " valid pixels)" << std::endl;
}
private:
static void erode(DepthImage32& output, const DepthImage32& input, int structureSize, float dThresh, float fracReq)
{
output.allocate(input.getWidth(), input.getHeight());
for (unsigned int y = 0; y < input.getHeight(); y++) {
for (unsigned int x = 0; x < input.getWidth(); x++) {
unsigned int count = 0;
float oldDepth = input(x, y);
for (int i = -structureSize; i <= structureSize; i++) {
for (int j = -structureSize; j <= structureSize; j++) {
if (x + j >= 0 && x + j < input.getWidth() && y + i >= 0 && y + i < input.getHeight()) {
float depth = input(x + j, y + i);
if (depth == input.getInvalidValue() || depth == 0.0f || fabs(depth - oldDepth) > dThresh) {
count++;
}
}
}
}
unsigned int sum = (2 * structureSize + 1)*(2 * structureSize + 1);
if ((float)count / (float)sum >= fracReq) {
output(x, y) = input.getInvalidValue();
}
else {
output(x, y) = input(x, y);
}
}
}
}
};
|
DoAllWrap.h | /*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
#ifndef GALOIS_DOALL_WRAPPER_H
#define GALOIS_DOALL_WRAPPER_H
#include "galois/Galois.h"
#include "galois/GaloisForwardDecl.h"
#include "galois/OrderedTraits.h"
#include "galois/runtime/Executor_DoAll_Old.h"
#include "galois/runtime/Executor_DoAll.h"
#include "galois/substrate/EnvCheck.h"
#ifdef GALOIS_USE_TBB
#include "tbb/parallel_for_each.h"
#endif
#include "CilkInit.h"
#include <unistd.h>
#include "llvm/Support/CommandLine.h"
namespace galois {
enum DoAllTypes {
DO_ALL_OLD,
DO_ALL_OLD_STEAL,
DOALL_GALOIS_FOREACH,
DO_ALL,
DOALL_CILK,
DOALL_OPENMP
};
namespace cll = llvm::cl;
// extern cll::opt<DoAllTypes> doAllKind;
static cll::opt<DoAllTypes> doAllKind(
"doAllKind", cll::desc("DoAll Implementation"),
cll::values(clEnumVal(DO_ALL_OLD, "DO_ALL_OLD"),
clEnumVal(DO_ALL_OLD_STEAL, "DO_ALL_OLD_STEAL"),
clEnumVal(DOALL_GALOIS_FOREACH, "DOALL_GALOIS_FOREACH"),
clEnumVal(DO_ALL, "DO_ALL"),
clEnumVal(DOALL_CILK, "DOALL_CILK"),
clEnumVal(DOALL_OPENMP, "DOALL_OPENMP"), clEnumValEnd),
cll::init(DO_ALL_OLD)); // default is regular DOALL
void setDoAllImpl(const DoAllTypes& type);
DoAllTypes getDoAllImpl(void);
template <DoAllTypes TYPE>
struct DoAllImpl {
template <typename R, typename F, typename ArgsTuple>
static inline void go(const R& range, const F& func,
const ArgsTuple& argsTuple) {
std::abort();
}
};
template <>
struct DoAllImpl<DO_ALL_OLD> {
template <typename R, typename F, typename ArgsTuple>
static inline void go(const R& range, const F& func,
const ArgsTuple& argsTuple) {
galois::runtime::do_all_gen_old(
range, func, std::tuple_cat(std::make_tuple(steal()), argsTuple));
}
};
template <>
struct DoAllImpl<DO_ALL_OLD_STEAL> {
template <typename R, typename F, typename ArgsTuple>
static inline void go(const R& range, const F& func,
const ArgsTuple& argsTuple) {
galois::runtime::do_all_gen_old(
range, func, std::tuple_cat(std::make_tuple(steal()), argsTuple));
}
};
template <>
struct DoAllImpl<DOALL_GALOIS_FOREACH> {
template <typename T, typename _F>
struct FuncWrap {
_F func;
template <typename C>
void operator()(T& x, C&) {
func(x);
}
};
template <typename R, typename F, typename ArgsTuple>
static inline void go(const R& range, const F& func,
const ArgsTuple& argsTuple) {
using T = typename R::value_type;
const unsigned CHUNK_SIZE = 128;
// const unsigned CHUNK_SIZE = get_type_by_supertype<chunk_size_tag,
// ArgsTuple>::type::value;
using WL_ty = galois::worklists::PerThreadChunkLIFO<CHUNK_SIZE, T>;
galois::runtime::for_each_gen(
range, FuncWrap<T, F>{func},
std::tuple_cat(
std::make_tuple(galois::wl<WL_ty>(), no_pushes(), no_conflicts()),
argsTuple));
}
};
template <>
struct DoAllImpl<DO_ALL> {
template <typename R, typename F, typename ArgsTuple>
static inline void go(const R& range, const F& func,
const ArgsTuple& argsTuple) {
galois::runtime::do_all_gen(range, func, argsTuple);
}
};
#ifdef HAVE_CILK
template <>
struct DoAllImpl<DOALL_CILK> {
template <typename R, typename F, typename ArgsTuple>
static inline void go(const R& range, const F& func,
const ArgsTuple& argsTuple) {
CilkInit();
cilk_for(auto it = range.begin(), end = range.end(); it != end; ++it) {
func(*it);
}
}
};
#else
template <>
struct DoAllImpl<DOALL_CILK> {
template <typename R, typename F, typename ArgsTuple>
static inline void go(const R& range, const F& func,
const ArgsTuple& argsTuple) {
GALOIS_DIE("Cilk not found\n");
}
};
#endif
template <>
struct DoAllImpl<DOALL_OPENMP> {
template <typename R, typename F, typename ArgsTuple>
static inline void go(const R& range, const F& func,
const ArgsTuple& argsTuple) {
const auto end = range.end();
#pragma omp parallel for schedule(guided)
for (auto it = range.begin(); it < end; ++it) {
func(*it);
}
}
};
template <typename R, typename F, typename ArgsTuple>
void do_all_choice(const R& range, const F& func, const DoAllTypes& type,
const ArgsTuple& argsTuple) {
switch (type) {
case DO_ALL_OLD_STEAL:
DoAllImpl<DO_ALL_OLD_STEAL>::go(range, func, argsTuple);
break;
case DOALL_GALOIS_FOREACH:
DoAllImpl<DOALL_GALOIS_FOREACH>::go(range, func, argsTuple);
break;
case DO_ALL_OLD:
DoAllImpl<DO_ALL_OLD>::go(range, func, argsTuple);
break;
case DO_ALL:
DoAllImpl<DO_ALL>::go(range, func, argsTuple);
break;
case DOALL_CILK:
DoAllImpl<DOALL_CILK>::go(range, func, argsTuple);
break;
case DOALL_OPENMP:
// DoAllImpl<DOALL_OPENMP>::go(range, func, argsTuple);
std::abort();
break;
default:
abort();
break;
}
}
template <typename R, typename F, typename ArgsTuple>
void do_all_choice(const R& range, const F& func, const ArgsTuple& argsTuple) {
do_all_choice(range, func, doAllKind, argsTuple);
}
} // end namespace galois
#endif // GALOIS_DOALL_WRAPPER_H
|
DataGen.h | // Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#pragma once
#include "common/Schema.h"
#include <random>
#include <memory>
#include <cstring>
#include "segcore/SegmentGrowing.h"
#include "segcore/SegmentSealed.h"
#include "Constants.h"
#include <boost/algorithm/string/predicate.hpp>
#include "segcore/SegmentSealed.h"
#include <knowhere/index/vector_index/VecIndex.h>
#include <knowhere/index/vector_index/adapter/VectorAdapter.h>
#include <knowhere/index/vector_index/VecIndexFactory.h>
#include <knowhere/index/vector_index/IndexIVF.h>
#include <query/SearchOnIndex.h>
using boost::algorithm::starts_with;
namespace milvus::segcore {
struct GeneratedData {
std::vector<uint8_t> rows_;
std::vector<aligned_vector<uint8_t>> cols_;
std::vector<idx_t> row_ids_;
std::vector<Timestamp> timestamps_;
RowBasedRawData raw_;
template <typename T>
auto
get_col(int index) const {
auto& target = cols_.at(index);
std::vector<T> ret(target.size() / sizeof(T));
memcpy(ret.data(), target.data(), target.size());
return ret;
}
template <typename T>
auto
get_mutable_col(int index) {
auto& target = cols_.at(index);
assert(target.size() == row_ids_.size() * sizeof(T));
auto ptr = reinterpret_cast<T*>(target.data());
return ptr;
}
private:
GeneratedData() = default;
friend GeneratedData
DataGen(SchemaPtr schema, int64_t N, uint64_t seed, uint64_t ts_offset);
void
generate_rows(int64_t N, SchemaPtr schema);
};
inline void
GeneratedData::generate_rows(int64_t N, SchemaPtr schema) {
std::vector<int> offset_infos(schema->size() + 1, 0);
auto sizeof_infos = schema->get_sizeof_infos();
std::partial_sum(sizeof_infos.begin(), sizeof_infos.end(), offset_infos.begin() + 1);
int64_t len_per_row = offset_infos.back();
assert(len_per_row == schema->get_total_sizeof());
// change column-based data to row-based data
std::vector<uint8_t> result(len_per_row * N);
for (int index = 0; index < N; ++index) {
for (int fid = 0; fid < schema->size(); ++fid) {
auto len = sizeof_infos[fid];
auto offset = offset_infos[fid];
auto src = cols_[fid].data() + index * len;
auto dst = result.data() + index * len_per_row + offset;
memcpy(dst, src, len);
}
}
rows_ = std::move(result);
raw_.raw_data = rows_.data();
raw_.sizeof_per_row = schema->get_total_sizeof();
raw_.count = N;
}
inline GeneratedData
DataGen(SchemaPtr schema, int64_t N, uint64_t seed = 42, uint64_t ts_offset = 0) {
using std::vector;
std::vector<aligned_vector<uint8_t>> cols;
std::default_random_engine er(seed);
std::normal_distribution<> distr(0, 1);
int offset = 0;
auto insert_cols = [&cols](auto& data) {
using T = std::remove_reference_t<decltype(data)>;
auto len = sizeof(typename T::value_type) * data.size();
auto ptr = aligned_vector<uint8_t>(len);
memcpy(ptr.data(), data.data(), len);
cols.emplace_back(std::move(ptr));
};
for (auto& field : schema->get_fields()) {
switch (field.get_data_type()) {
case engine::DataType::VECTOR_FLOAT: {
auto dim = field.get_dim();
vector<float> final(dim * N);
bool is_ip = starts_with(field.get_name().get(), "normalized");
#pragma omp parallel for
for (int n = 0; n < N; ++n) {
vector<float> data(dim);
float sum = 0;
std::default_random_engine er2(seed + n);
std::normal_distribution<> distr2(0, 1);
for (auto& x : data) {
x = distr2(er2) + offset;
sum += x * x;
}
if (is_ip) {
sum = sqrt(sum);
for (auto& x : data) {
x /= sum;
}
}
std::copy(data.begin(), data.end(), final.begin() + dim * n);
}
insert_cols(final);
break;
}
case engine::DataType::VECTOR_BINARY: {
auto dim = field.get_dim();
Assert(dim % 8 == 0);
vector<uint8_t> data(dim / 8 * N);
for (auto& x : data) {
x = er();
}
insert_cols(data);
break;
}
case engine::DataType::INT64: {
vector<int64_t> data(N);
// begin with counter
if (starts_with(field.get_name().get(), "counter")) {
int64_t index = 0;
for (auto& x : data) {
x = index++;
}
} else {
int i = 0;
for (auto& x : data) {
x = er() % (2 * N);
x = i;
i++;
}
}
insert_cols(data);
break;
}
case engine::DataType::INT32: {
vector<int> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data);
break;
}
case engine::DataType::FLOAT: {
vector<float> data(N);
for (auto& x : data) {
x = distr(er);
}
insert_cols(data);
break;
}
case engine::DataType::DOUBLE: {
vector<double> data(N);
for (auto& x : data) {
x = distr(er);
}
insert_cols(data);
break;
}
default: {
throw std::runtime_error("unimplemented");
}
}
++offset;
}
GeneratedData res;
res.cols_ = std::move(cols);
for (int i = 0; i < N; ++i) {
res.row_ids_.push_back(i);
res.timestamps_.push_back(i + ts_offset);
}
// std::shuffle(res.row_ids_.begin(), res.row_ids_.end(), er);
res.generate_rows(N, schema);
return std::move(res);
}
inline auto
CreatePlaceholderGroup(int64_t num_queries, int dim, int64_t seed = 42) {
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::FloatVector);
std::normal_distribution<double> dis(0, 1);
std::default_random_engine e(seed);
for (int i = 0; i < num_queries; ++i) {
std::vector<float> vec;
for (int d = 0; d < dim; ++d) {
vec.push_back(dis(e));
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size() * sizeof(float));
}
return raw_group;
}
inline auto
CreatePlaceholderGroupFromBlob(int64_t num_queries, int dim, const float* src) {
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::FloatVector);
int64_t src_index = 0;
for (int i = 0; i < num_queries; ++i) {
std::vector<float> vec;
for (int d = 0; d < dim; ++d) {
vec.push_back(src[src_index++]);
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size() * sizeof(float));
}
return raw_group;
}
inline auto
CreateBinaryPlaceholderGroup(int64_t num_queries, int64_t dim, int64_t seed = 42) {
assert(dim % 8 == 0);
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::BinaryVector);
std::default_random_engine e(seed);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
for (int d = 0; d < dim / 8; ++d) {
vec.push_back(e());
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}
inline auto
CreateBinaryPlaceholderGroupFromBlob(int64_t num_queries, int64_t dim, const uint8_t* ptr) {
assert(dim % 8 == 0);
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::BinaryVector);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
for (int d = 0; d < dim / 8; ++d) {
vec.push_back(*ptr);
++ptr;
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}
inline json
SearchResultToJson(const SearchResult& sr) {
int64_t num_queries = sr.num_queries_;
int64_t topk = sr.topk_;
std::vector<std::vector<std::string>> results;
for (int q = 0; q < num_queries; ++q) {
std::vector<std::string> result;
for (int k = 0; k < topk; ++k) {
int index = q * topk + k;
result.emplace_back(std::to_string(sr.internal_seg_offsets_[index]) + "->" +
std::to_string(sr.result_distances_[index]));
}
results.emplace_back(std::move(result));
}
return json{results};
};
inline void
SealedLoader(const GeneratedData& dataset, SegmentSealed& seg) {
// TODO
auto row_count = dataset.row_ids_.size();
{
LoadFieldDataInfo info;
info.blob = dataset.row_ids_.data();
info.row_count = dataset.row_ids_.size();
info.field_id = 0; // field id for RowId
seg.LoadFieldData(info);
}
{
LoadFieldDataInfo info;
info.blob = dataset.timestamps_.data();
info.row_count = dataset.timestamps_.size();
info.field_id = 1;
seg.LoadFieldData(info);
}
int field_offset = 0;
for (auto& meta : seg.get_schema().get_fields()) {
LoadFieldDataInfo info;
info.field_id = meta.get_id().get();
info.row_count = row_count;
info.blob = dataset.cols_[field_offset].data();
seg.LoadFieldData(info);
++field_offset;
}
}
inline std::unique_ptr<SegmentSealed>
SealedCreator(SchemaPtr schema, const GeneratedData& dataset, const LoadIndexInfo& index_info) {
auto segment = CreateSealedSegment(schema);
SealedLoader(dataset, *segment);
segment->LoadIndex(index_info);
return segment;
}
inline knowhere::VecIndexPtr
GenIndexing(int64_t N, int64_t dim, const float* vec) {
// {knowhere::IndexParams::nprobe, 10},
auto conf = knowhere::Config{{knowhere::meta::DIM, dim},
{knowhere::IndexParams::nlist, 1024},
{knowhere::Metric::TYPE, milvus::knowhere::Metric::L2},
{knowhere::meta::DEVICEID, 0}};
auto database = knowhere::GenDataset(N, dim, vec);
auto indexing = std::make_shared<knowhere::IVF>();
indexing->Train(database, conf);
indexing->AddWithoutIds(database, conf);
return indexing;
}
} // namespace milvus::segcore
|
Cover.h | /*
* Cover.h
*
* Created on: 03.10.2013
* Author: cls
*/
#ifndef COVER_H_
#define COVER_H_
#include <cinttypes>
#include <set>
#include <vector>
#include <map>
#include <cassert>
#include <limits>
#include "Partition.h"
#include "../Globals.h"
namespace NetworKit {
/**
* @ingroup structures
* Implements a cover of a set, i.e. an assignment of
* its elements to possibly overlapping subsets.
*/
class Cover {
public:
/** Default constructor */
Cover();
/**
* Create a new cover data structure for elements up to a maximum element index.
*
* @param[in] z maximum index
*/
Cover(index z);
/**
* Creates a new cover data structure which contains the given partition.
*
* @param[in] p The partition to construct the cover from
*/
Cover(const Partition &p);
/** Default destructor */
virtual ~Cover() = default;
/**
* Index operator.
*
* @param[in] e an element
*/
inline std::set<index>& operator [](const index& e) {
return this->data[e];
}
/**
* Index operator for const instances of this class.
*
* @param[in] e an element
*/
inline const std::set<index>& operator [](const index& e) const {
return this->data[e];
}
/**
* Return the ids of subsets in which the element @a e is contained.
*
* @param[in] e an element
* @return A set of subset ids in which @a e is contained.
*/
inline std::set<index> subsetsOf(index e) const {
// TODO: assert (e < this->numberOfElements());
return this->data[e];
}
/**
* Check if cover assigns a valid subset to the element @a e.
*
* @param e an element.
* @return @c true, if @a e is assigned to a valid subset, @c false otherwise.
*/
bool contains(index e) const;
/**
* Check if two elements @a e1 and @a e2 belong to the same subset.
*
* @param e1 an element.
* @param e2 an element.
* @return @c true, if @a e1 and @a e2 belong to the same subset, @c false otherwise.
*/
bool inSameSubset(index e1, index e2) const;
/**
* Get the members of a specific subset @a s.
*
* @return The set of members of subset @a s.
*/
std::set<index> getMembers(const index s) const;
/**
* Add the (previously unassigned) element @a e to the set @a s.
* @param[in] s a subset
* @param[in] e an element
*/
void addToSubset(index s, index e);
/**
* Remove the element @a e from the set @a s.
* @param[in] s a subset
* @param[in] e an element
*/
void removeFromSubset(index s, index e);
/**
* Move the element @a e to subset @a s, i.e. remove it from all
* other subsets and place it in the subset.
* @param[in] s a subset
* @param[in] e an element
*/
void moveToSubset(index s, index e);
/**
* Creates a singleton set containing the element @a e and returns the index of the new set.
* @param[in] e an element
* @return The index of the new set.
*/
index toSingleton(index e);
/**
* Assigns every element to a singleton set.
* Set id is equal to element id.
*/
void allToSingletons();
/**
* Assigns the elements from both sets to a new set.
* @param[in] s a subset
* @param[in] t a subset
*/
void mergeSubsets(index s, index t);
/**
* Get an upper bound for the subset ids that have been assigned.
* (This is the maximum id + 1.)
*
* @return An upper bound.
*/
index upperBound() const;
/**
* Get a lower bound for the subset ids that have been assigned.
* @return A lower bound.
*/
index lowerBound() const;
/**
* Get a list of subset sizes. Indices do not necessarily correspond to subset ids.
*
* @return A list of subset sizes.
*/
std::vector<count> subsetSizes() const;
/**
* Get a map from subset id to size of the subset.
*
* @return A map from subset id to size of the subset.
*/
std::map<index, count> subsetSizeMap() const;
/**
* Get the current number of sets in this cover.
*
* @return The number of sets in this cover.
*/
count numberOfSubsets() const;
/**
* Get the current number of elements in this cover.
*
* @return The current number of elements.
*/
count numberOfElements() const;
/**
* Get the ids of nonempty subsets.
*
* @return A set of ids of nonempty subsets.
*/
std::set<index> getSubsetIds() const;
/**
* Sets an upper bound for the subset ids that CAN be assigned.
*
* @param[in] upper highest assigned subset ID + 1
*/
void setUpperBound(index upper);
/**
* Iterate over all entries (node, subset ID of node) and execute callback function @a func (lambda closure).
*
* @param func Takes parameters <code>(node, index)</code>
*/
template<typename Callback> void forEntries(Callback func) const;
/**
* Iterate over all entries (node, subset ID of node) in parallel and execute callback function @a func (lambda closure).
*
* @param func Takes parameters <code>(node, index)</code>
*/
template<typename Callback> void parallelForEntries(Callback handle) const;
private:
index z; //!< maximum element index that can be mapped
index omega; //!< maximum subset index ever assigned
std::vector<std::set<index>> data; //!< data container, indexed by element id, containing set of subset ids
/**
* Allocates and returns a new subset id.
*/
inline index newSubsetId() {
omega++;
index s = omega;
return s;
}
};
template<typename Callback>
inline void Cover::forEntries(Callback handle) const {
for (index e = 0; e <= this->z; e += 1) {
handle(e, data[e]);
}
}
template<typename Callback>
inline void Cover::parallelForEntries(Callback handle) const {
#pragma omp parallel for
for (index e = 0; e <= this->z; e += 1) {
handle(e, data[e]);
}
}
} /* namespace NetworKit */
#endif /* COVER_H_ */
|
copysplit.c | int main () {
int X = 9;
int diff = 10;
int diff1 = 20;
#pragma omp parallel
{
while (1) {
if (X < 10) {
#pragma omp single
{
diff = diff1;
}
#pragma omp barrier
break;
}
#pragma omp barrier
}
}
X = diff;
}
|
portrend.c | #include <math.h>
#include <SDL.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
/* Define window size */
#define W2 640 // width of screen
#define H 480 // height of screen
#define W 640 // width of _game_ screen (in SplitScreen mode, map is drawn in the remaining space)
/* Define camera height from floor and distance to ceiling */
#define EyeHeight 6
#define DuckHeight 2.5
#define HeadMargin 1
#define KneeHeight 2 /* Maximum walkable obstacle height */
/* Define factors that affect the field of vision (never >= 180 degrees though) */
#define hfov (1.0 * 0.73f*H/W)
#define vfov (1.0 * .2f)
#define TextureMapping
//#define DepthShading
#define LightMapping
#define VisibilityTracking
//#define SplitScreen
// Utility functions. Because C doesn't have templates,
// we use the slightly less safe preprocessor macros to
// implement these functions that work with multiple types.
#define min(a,b) (((a) < (b)) ? (a) : (b)) // min: Choose smaller of two values.
#define max(a,b) (((a) > (b)) ? (a) : (b)) // max: Choose bigger of two values.
#define abs(a) ((a) < 0 ? -(a) : (a))
#define clamp(a, mi,ma) min(max(a,mi),ma) // clamp: Clamp value into set range.
#define sign(v) (((v) > 0) - ((v) < 0)) // sign: Return the sign of a value (-1, 0 or 1)
#define vxs(x0,y0, x1,y1) ((x0)*(y1) - (x1)*(y0)) // vxs: Vector cross product
// Overlap: Determine whether the two number ranges overlap.
#define Overlap(a0,a1,b0,b1) (min(a0,a1) <= max(b0,b1) && min(b0,b1) <= max(a0,a1))
// IntersectBox: Determine whether two 2D-boxes intersect.
#define IntersectBox(x0,y0, x1,y1, x2,y2, x3,y3) (Overlap(x0,x1,x2,x3) && Overlap(y0,y1,y2,y3))
// PointSide: Determine which side of a line the point is on. Return value: -1, 0 or 1.
#define PointSide(px,py, x0,y0, x1,y1) sign(vxs((x1)-(x0), (y1)-(y0), (px)-(x0), (py)-(y0)))
// Intersect: Calculate the point of intersection between two lines.
#define Intersect(x1,y1, x2,y2, x3,y3, x4,y4) ((struct xy) { \
vxs(vxs(x1,y1, x2,y2), (x1)-(x2), vxs(x3,y3, x4,y4), (x3)-(x4)) / vxs((x1)-(x2), (y1)-(y2), (x3)-(x4), (y3)-(y4)), \
vxs(vxs(x1,y1, x2,y2), (y1)-(y2), vxs(x3,y3, x4,y4), (y3)-(y4)) / vxs((x1)-(x2), (y1)-(y2), (x3)-(x4), (y3)-(y4)) })
// Some hard-coded limits.
#define MaxVertices 100 // maximum number of vertices in a map
#define MaxEdges 100 // maximum number of edges in a sector
#define MaxQueue 32 // maximum number of pending portal renders
#ifdef TextureMapping
typedef int Texture[1024][1024];
struct TextureSet { Texture texture, normalmap, lightmap, lightmap_diffuseonly; };
#endif
/* Sectors: Floor and ceiling height; list of wall vertexes and neighbors */
static struct sector
{
float floor, ceil;
struct xy { float x, y; } *vertex; /* Each vertex has an x and y coordinate */
unsigned short npoints; /* How many vertexes there are */
signed char *neighbors; /* Each pair of vertexes may have a corresponding neighboring sector */
#ifdef VisibilityTracking
int visible;
#endif
#ifdef TextureMapping
struct TextureSet *floortexture, *ceiltexture, *uppertextures, *lowertextures;
#endif
} *sectors = NULL;
static unsigned NumSectors = 0;
#ifdef VisibilityTracking
#define MaxVisibleSectors 256
struct xy VisibleFloorBegins[MaxVisibleSectors][W], VisibleFloorEnds[MaxVisibleSectors][W];
char VisibleFloors[MaxVisibleSectors][W];
struct xy VisibleCeilBegins[MaxVisibleSectors][W], VisibleCeilEnds[MaxVisibleSectors][W];
char VisibleCeils[MaxVisibleSectors][W];
unsigned NumVisibleSectors=0;
#endif
/* Player: location */
static struct player
{
struct xyz { float x,y,z; } where, /* Current position */
velocity; /* Current motion vector */
float angle, anglesin, anglecos, yaw; /* Looking towards (and sin() and cos() thereof) */
unsigned char sector; /* Which sector the player is currently in */
} player;
#ifdef LightMapping
static struct light
{
struct xyz where, light;
unsigned char sector;
}* lights = NULL;
static unsigned NumLights = 0;
#endif
static SDL_Surface* surface = NULL;
static void SaveFrame1(void)
{
return;
char Buf[512];
sprintf(Buf, "ffmpeg -an -f rawvideo -pix_fmt bgr0 -s %ux%u -r 60 -i - -aspect %u/%u -c:v h264 -crf 2 -preset fast -y file1.avi", W2,H, W2,H);
static FILE* fp = NULL;
if(!fp) { fp = /*fopen("file1.bin", "wb");*/ popen(Buf, "w"); }
fwrite(surface->pixels, W2*H, 4, fp);
fflush(fp);
}
static void SaveFrame2(void)
{
return;
//static unsigned skip=0;
//if(++skip>=3) { skip=0; } else return;
char Buf[512];
sprintf(Buf, "ffmpeg -an -f rawvideo -pix_fmt bgr0 -s %ux%u -r 60 -i - -aspect %u/%u -c:v h264 -crf 2 -preset fast -y file2.avi", W2,H, W2,H);
static FILE* fp = NULL;
if(!fp) { fp = /*fopen("file2.bin", "wb");*/ popen(Buf, "w"); }
fwrite(surface->pixels, W2*H, 4, fp);
fflush(fp);
}
static void LoadData(void)
{
FILE* fp = fopen("map.txt", "rt");
if(!fp) { perror("map.txt"); exit(1); }
char Buf[256], word[256], *ptr;
struct xy vertex[MaxVertices], *vertexptr = vertex;
float x,y,angle,number, numbers[MaxEdges];
int n, m;
while(fgets(Buf, sizeof Buf, fp))
switch(sscanf(ptr = Buf, "%32s%n", word, &n) == 1 ? word[0] : '\0')
{
case 'v': // vertex
for(sscanf(ptr += n, "%f%n", &y, &n); sscanf(ptr += n, "%f%n", &x, &n) == 1; )
{
if(vertexptr >= vertex+MaxVertices) { fprintf(stderr, "ERROR: Too many vertices, limit is %u\n", MaxVertices); exit(2); }
*vertexptr++ = (struct xy) { x, y };
}
break;
case 's': // sector
sectors = realloc(sectors, ++NumSectors * sizeof(*sectors));
struct sector* sect = §ors[NumSectors-1];
sscanf(ptr += n, "%f%f%n", §->floor,§->ceil, &n);
for(m=0; sscanf(ptr += n, "%32s%n", word, &n) == 1 && word[0] != '#'; )
{
if(m >= MaxEdges)
{ fprintf(stderr, "ERROR: Too many edges in sector %u. Limit is %u\n", NumSectors-1, MaxEdges); exit(2); }
numbers[m++] = word[0]=='x' ? -1 : strtof(word,0);
}
sect->npoints = m /= 2;
sect->neighbors = malloc( (m ) * sizeof(*sect->neighbors) );
sect->vertex = malloc( (m+1) * sizeof(*sect->vertex) );
#ifdef VisibilityTracking
sect->visible = 0;
#endif
for(n=0; n<m; ++n) sect->neighbors[n] = numbers[m + n];
for(n=0; n<m; ++n)
{
int v = numbers[n];
if(v >= vertexptr-vertex)
{ fprintf(stderr, "ERROR: Invalid vertex number %d in sector %u; only have %u\n",
v, NumSectors-1, (int)(vertexptr-vertex)); exit(2); }
sect->vertex[n+1] = vertex[v]; // TODO: bounds checking
}
sect->vertex[0] = sect->vertex[m];
break;
#ifdef LightMapping
case 'l': // light
lights = realloc(lights, ++NumLights * sizeof(*lights));
struct light* light = &lights[NumLights-1];
sscanf(ptr += n, "%f %f %f %f %f %f %f", &light->where.x, &light->where.z, &light->where.y,
&number, &light->light.x, &light->light.y, &light->light.z);
light->sector = (int)number;
break;
#endif
case 'p': // player
sscanf(ptr += n, "%f %f %f %f", &x,&y, &angle,&number);
player = (struct player) { {x,y,0},{0,0,0}, angle,0,0,0, number };
player.where.z = sectors[player.sector].floor + EyeHeight;
player.anglesin = sinf(player.angle);
player.anglecos = cosf(player.angle);
}
fclose(fp);
}
static void UnloadData(void)
{
for(unsigned a=0; a<NumSectors; ++a)
free(sectors[a].vertex), free(sectors[a].neighbors);
free(sectors);
sectors = NULL;
NumSectors = 0;
}
static int IntersectLineSegments(float x0,float y0, float x1,float y1,
float x2,float y2, float x3,float y3)
{
return IntersectBox(x0,y0,x1,y1, x2,y2,x3,y3)
&& abs(PointSide(x2,y2, x0,y0,x1,y1) + PointSide(x3,y3, x0,y0,x1,y1)) != 2
&& abs(PointSide(x0,y0, x2,y2,x3,y3) + PointSide(x1,y1, x2,y2,x3,y3)) != 2;
}
struct Scaler { int result, bop, fd, ca, cache; };
#define Scaler_Init(a,b,c,d,f) \
{ d + (b-1 - a) * (f-d) / (c-a), ((f<d) ^ (c<a)) ? -1 : 1, \
abs(f-d), abs(c-a), (int)((b-1-a) * abs(f-d)) % abs(c-a) }
// Scaler_Next: Return (b++ - a) * (f-d) / (c-a) + d using the initial values passed to Scaler_Init().
static int Scaler_Next(struct Scaler* i)
{
for(i->cache += i->fd; i->cache >= i->ca; i->cache -= i->ca) i->result += i->bop;
return i->result;
}
#ifdef TextureMapping
# include <sys/mman.h>
# include <unistd.h>
# include <fcntl.h>
# include <sys/stat.h>
# include <errno.h>
static int LoadTexture(void)
{
int initialized = 0;
int fd = open("portrend_textures.bin", O_RDWR | O_CREAT, 0644);
if(lseek(fd, 0, SEEK_END) == 0)
{
InitializeTextures:;
// Initialize by loading textures
#define LoadTexture(filename, name) \
Texture* name = NULL; do { \
FILE* fp = fopen(filename, "rb"); \
if(!fp) perror(filename); else { \
name = malloc(sizeof(*name)); \
fseek(fp, 0x11, SEEK_SET); \
for(unsigned y=0; y<1024; ++y) \
for(unsigned x=0; x<1024; ++x) \
{ \
int r = fgetc(fp), g = fgetc(fp), b = fgetc(fp); \
(*name)[x][y] = r*65536 + g*256 + b; \
} \
fclose(fp); } \
} while(0)
#define UnloadTexture(name) free(name)
Texture dummylightmap;
memset(&dummylightmap, 0, sizeof(dummylightmap));
LoadTexture("wall2.ppm", WallTexture); LoadTexture("wall2_norm.ppm", WallNormal);
LoadTexture("wall3.ppm", WallTexture2); LoadTexture("wall3_norm.ppm", WallNormal2);
LoadTexture("floor2.ppm", FloorTexture); LoadTexture("floor2_norm.ppm", FloorNormal);
LoadTexture("ceil2.ppm", CeilTexture); LoadTexture("ceil2_norm.ppm", CeilNormal);
#define SafeWrite(fd, buf, amount) do { \
const char* source = (const char*)(buf); \
long remain = (amount); \
while(remain > 0) { \
long result = write(fd, source, remain); \
if(result >= 0) { remain -= result; source += result; } \
else if(errno == EAGAIN || errno == EINTR) continue; \
else break; \
} \
if(remain > 0) perror("write"); \
} while(0)
#define PutTextureSet(txtname, normname) do { \
SafeWrite(fd, txtname, sizeof(Texture)); \
SafeWrite(fd, normname, sizeof(Texture)); \
SafeWrite(fd, &dummylightmap, sizeof(Texture)); \
SafeWrite(fd, &dummylightmap, sizeof(Texture)); } while(0)
printf("Initializing textures... ");
lseek(fd, 0, SEEK_SET);
for(unsigned n=0; n<NumSectors; ++n)
{
for(int s=printf("%d/%d", n+1,NumSectors); s--; ) putchar('\b');
fflush(stdout);
PutTextureSet(FloorTexture, FloorNormal);
PutTextureSet(CeilTexture, CeilNormal);
for(unsigned w=0; w<sectors[n].npoints; ++w) PutTextureSet(WallTexture, WallNormal);
for(unsigned w=0; w<sectors[n].npoints; ++w) PutTextureSet(WallTexture2, WallNormal2);
}
ftruncate(fd, lseek(fd, 0, SEEK_CUR));
printf("\n"); fflush(stdout);
UnloadTexture(WallTexture); UnloadTexture(WallNormal);
UnloadTexture(WallTexture2); UnloadTexture(WallNormal2);
UnloadTexture(FloorTexture); UnloadTexture(FloorNormal);
UnloadTexture(CeilTexture); UnloadTexture(CeilNormal);
#undef UnloadTexture
#undef LoadTexture
initialized = 1;
}
off_t filesize = lseek(fd, 0, SEEK_END);
char* texturedata = mmap(NULL, filesize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
if(!texturedata) perror("mmap");
printf("Loading textures\n");
off_t pos = 0;
for(unsigned n=0; n<NumSectors; ++n)
{
sectors[n].floortexture = (void*) (texturedata + pos); pos += sizeof(struct TextureSet);
sectors[n].ceiltexture = (void*) (texturedata + pos); pos += sizeof(struct TextureSet);
unsigned w = sectors[n].npoints;
sectors[n].uppertextures = (void*) (texturedata + pos); pos += sizeof(struct TextureSet) * w;
sectors[n].lowertextures = (void*) (texturedata + pos); pos += sizeof(struct TextureSet) * w;
}
printf("done, %llu bytes mmapped out of %llu\n", (unsigned long long)pos, (unsigned long long) filesize);
if(pos != filesize)
{
printf(" -- Wrong filesize! Let's try that again.\n");
munmap(texturedata, filesize);
goto InitializeTextures;
}
return initialized;
}
#ifdef LightMapping
#define vlen(x,y,z) sqrtf((x)*(x) + (y)*(y) + (z)*(z))
#define vlen2(x0,y0,z0, x1,y1,z1) vlen((x1)-(x0), (y1)-(y0), (z1)-(z0))
#define vdot3(x0,y0,z0, x1,y1,z1) ((x0)*(x1) + (y0)*(y1) + (z0)*(z1))
#define vxs3(x0,y0,z0, x1,y1,z1) (struct xyz){ vxs(y0,z0, y1,z1), vxs(z0,x0, z1,x1), vxs(x0,y0, x1,y1) }
struct Intersection
{
// Map coordinates where the hit happened. x,z = map, y = height
struct xyz where;
// Information about the surface that was hit
struct TextureSet* surface;
struct xyz normal; // Perturbed surface normal
int sample; // RGB sample from surface texture & lightmap
int sectorno;
};
static int ClampWithDesaturation(int r,int g,int b)
{
int luma = r*299 + g*587 + b*114;
if(luma > 255000) { r=g=b=255; }
else if(luma <= 0) { r=g=b=0; }
else
{
double sat = 1000;
if(r > 255) sat = min(sat, (luma-255e3) / (luma-r)); else if(r < 0) sat = min(sat, luma / (double)(luma-r));
if(g > 255) sat = min(sat, (luma-255e3) / (luma-g)); else if(g < 0) sat = min(sat, luma / (double)(luma-g));
if(b > 255) sat = min(sat, (luma-255e3) / (luma-b)); else if(b < 0) sat = min(sat, luma / (double)(luma-b));
if(sat != 1.)
{
r = (r - luma) * sat/1e3 + luma; r = clamp(r,0,255);
g = (g - luma) * sat/1e3 + luma; g = clamp(g,0,255);
b = (b - luma) * sat/1e3 + luma; b = clamp(b,0,255);
}
}
return r*65536 + g*256 + b;
}
static int ApplyLight(int texture, int light)
{
int tr = (texture >>16) & 0xFF;
int tg = (texture >> 8) & 0xFF;
int tb = (texture >> 0) & 0xFF;
int lr = ((light >>16) & 0xFF);
int lg = ((light >> 8) & 0xFF);
int lb = ((light >> 0) & 0xFF);
int r = tr*lr *2 / 255;
int g = tg*lg *2 / 255;
int b = tb*lb *2 / 255;
#if 1
return ClampWithDesaturation(r,g,b);
#else
return clamp(tr*lr / 255,0,255)*65536
+ clamp(tg*lg / 255,0,255)*256
+ clamp(tb*lb / 255,0,255);
#endif
}
static void PutColor(int* target, struct xyz color)
{
*target = ClampWithDesaturation(color.x, color.y, color.z);
}
static void AddColor(int* target, struct xyz color)
{
int r = ((*target >> 16) & 0xFF) + color.x;
int g = ((*target >> 8) & 0xFF) + color.y;
int b = ((*target >> 0) & 0xFF) + color.z;
*target = ClampWithDesaturation(r, g, b);
}
static struct xyz PerturbNormal(struct xyz normal,
struct xyz tangent,
struct xyz bitangent,
int normal_sample)
{
struct xyz perturb = { ((normal_sample >> 16) & 0xFF) / 127.5f - 1.f,
((normal_sample >> 8) & 0xFF) / 127.5f - 1.f,
((normal_sample >> 0) & 0xFF) / 127.5f - 1.f};
// TODO: Verify whether this calculation is correct
return (struct xyz) { normal.x * perturb.z + bitangent.x * perturb.y + tangent.x * perturb.x,
normal.y * perturb.z + bitangent.y * perturb.y + tangent.y * perturb.x,
normal.z * perturb.z + bitangent.z * perturb.y + tangent.z * perturb.x };
}
static void GetSectorBoundingBox(int sectorno, struct xy* bounding_min, struct xy* bounding_max)
{
const struct sector* sect = §ors[sectorno];
for(int s = 0; s < sect->npoints; ++s)
{
bounding_min->x = min(bounding_min->x, sect->vertex[s].x);
bounding_min->y = min(bounding_min->y, sect->vertex[s].y);
bounding_max->x = max(bounding_max->x, sect->vertex[s].x);
bounding_max->y = max(bounding_max->y, sect->vertex[s].y);
}
}
// Return values:
// 0 = clear path, nothing hit
// 1 = hit, *result indicates where it hit
// 2 = your princess is in another castle (a direct path doesn't lead to this sector)
static int IntersectRay(struct xyz origin, int origin_sectorno,
struct xyz target, int target_sectorno,
struct Intersection* result)
{
unsigned n_rescan=0;
int prev_sectorno=-1;
rescan:;
++n_rescan;
struct sector* sect = §ors[origin_sectorno];
/*printf("Intersect: Now in sector %d at %.3f %.3f %.3f, going towards sector %d at %.3f %.3f %.3f\n",
origin_sectorno,origin.x,origin.y,origin.z,
target_sectorno,target.x,target.y,target.z);*/
// Check if this beam hits one of the sector's edges.
unsigned u=0, v=0, lu=0, lv=0;
struct xyz tangent, bitangent;
for(int s = 0; s < sect->npoints; ++s)
{
float vx1 = sect->vertex[s+0].x, vy1 = sect->vertex[s+0].y;
float vx2 = sect->vertex[s+1].x, vy2 = sect->vertex[s+1].y;
if(!IntersectLineSegments(origin.x,origin.z, target.x,target.z, vx1,vy1, vx2,vy2)/*
|| PointSide(target.x,target.z, vx1,vy1, vx2,vy2) >= 0*/)
continue;
// Determine the X & Z coordinates of the wall hit.
struct xy hit = Intersect(origin.x,origin.z, target.x,target.z, vx1,vy1, vx2,vy2);
float x = hit.x;
float z = hit.y;
// Also determine the Y coordinate.
float y = origin.y + ((abs(target.x-origin.x) > abs(target.z-origin.z))
? ((x - origin.x) * (target.y - origin.y) / (target.x - origin.x))
: ((z - origin.z) * (target.y - origin.y) / (target.z - origin.z)) );
/*fprintf(stderr, "(%.2f, %.2f, %.2f) - (%.2f, %.2f, %.2f) versus (%.2f, %.2f) - (%.2f, %.2f) intersected at (%.2f, %.2f, %.2f)\n",
origin.x,origin.y,origin.z,
target.x,target.y,target.z,
vx1,vy1, vx2,vy2,
x,y,z);*/
/* Check where the hole is. */
float hole_low = 9e9, hole_high = -9e9;
if(sect->neighbors[s] >= 0)
{
hole_low = max( sect->floor, sectors[sect->neighbors[s]].floor );
hole_high = min( sect->ceil, sectors[sect->neighbors[s]].ceil );
}
if(y >= hole_low && y <= hole_high)
{
// The point fit in between this hole.
origin_sectorno = sect->neighbors[s];
origin.x = x + (target.x - origin.x)*1e-2;
origin.y = y + (target.y - origin.y)*1e-2;
origin.z = z + (target.z - origin.z)*1e-2;
float distance = vlen(target.x-origin.x, target.y-origin.y, target.z-origin.z);
if(origin_sectorno == prev_sectorno)
{
// Disregard this boundary.
continue;
}
if(distance < 1e-3f || origin_sectorno == prev_sectorno)
{
// Close enough.
goto close_enough;
}
prev_sectorno = origin_sectorno;
goto rescan;
}
// It hit the wall.
// Did it hit the sector's floor first?
if(y < sect->floor) goto hit_floor;
if(y > sect->ceil) goto hit_ceil;
// Nope. It hit the wall.
result->where = (struct xyz) { x,y,z };
result->surface = (y < hole_low) ? §->lowertextures[s] : §->uppertextures[s];
result->sectorno = origin_sectorno;
/*printf(" Thus hit a wall\n");*/
float nx = vy2-vy1, nz = vx1-vx2, len = sqrtf(nx*nx + nz*nz);
result->normal = (struct xyz){ nx/len, 0, nz/len };
nx = vx2-vx1; nz = vy2-vy1; len = sqrtf(nx*nx + nz*nz);
tangent = (struct xyz){ nx/len, 0, nz/len };
bitangent = (struct xyz) { 0,1,0};
// Calculate the texture coordinates.
float dx = vx2-vx1;
float dy = vy2-vy1;
v = (unsigned)((y - sect->floor) * 1024.f / (sect->ceil - sect->floor)) % 1024u;
u = (abs(dx) > abs(dy) ? (unsigned)((x - vx1) * 1024 / dx)
: (unsigned)((z - vy1) * 1024 / dy)) % 1024u;
// Lightmap coordinates are the same as texture coordinates.
lu = u;
lv = v;
perturb_normal:;
int texture_sample = result->surface->texture[v][u];
int normal_sample = result->surface->normalmap[v][u];
int light_sample = result->surface->lightmap[lv][lu];
result->sample = ApplyLight(texture_sample, light_sample);
result->normal = PerturbNormal(result->normal, tangent, bitangent, normal_sample);
return 1;
}
if(target.y > sect->ceil)
{
hit_ceil:
result->where.y = sect->ceil;
result->surface = sect->ceiltexture;
result->normal = (struct xyz){0,-1,0};
tangent = (struct xyz){1,0,0};
bitangent = vxs3(result->normal.x,result->normal.y,result->normal.z, tangent.x,tangent.y,tangent.z);
hit_ceil_or_floor:
// Either the floor or ceiling was hit. Determine the X & Z coordinates.
result->where.x = (result->where.y - origin.y) * (target.x - origin.x) / (target.y - origin.y) + origin.x;
result->where.z = (result->where.y - origin.y) * (target.z - origin.z) / (target.y - origin.y) + origin.z;
/*printf(" Hit the ceiling or floor at %.3f, %.3f, %.3f\n", result->where.x, result->where.y, result->where.z);*/
// Calculate the texture coordinates.
u = ((unsigned)(result->where.x * 256)) % 1024u;
v = ((unsigned)(result->where.z * 256)) % 1024u;
// Calculate the lightmap coordinates.
struct xy bounding_min = {1e9f, 1e9f}, bounding_max = {-1e9f, -1e9f};
GetSectorBoundingBox(origin_sectorno, &bounding_min, &bounding_max);
lu = ((unsigned)((result->where.x - bounding_min.x) * 1024 / (bounding_max.x - bounding_min.x))) % 1024;
lv = ((unsigned)((result->where.y - bounding_min.y) * 1024 / (bounding_max.y - bounding_min.y))) % 1024;
goto perturb_normal;
}
if(target.y < sect->floor)
{
hit_floor:
result->where.y = sect->floor;
result->surface = sect->floortexture;
result->normal = (struct xyz){0, 1,0};
tangent = (struct xyz){-1,0,0};
bitangent = vxs3(result->normal.x,result->normal.y,result->normal.z, tangent.x,tangent.y,tangent.z);
goto hit_ceil_or_floor;
}
close_enough:;
/*printf(" Hit nothing. Sector %s.\n", origin_sectorno == target_sectorno ? "match" : "doesn't match");*/
// Is the target in this sector?
return origin_sectorno == target_sectorno ? 0 : 2;
}
#define narealightcomponents 32 //512;//64
#define area_light_radius 0.4
#define nrandomvectors 128 // 8192
#define firstround 1
#define maxrounds 100
#define fade_distance_diffuse 10.0
#define fade_distance_radiosity 10.0
#define radiomul 1.0
static struct xyz tvec[nrandomvectors];
static struct xyz avec[narealightcomponents];
static void DiffuseLightCalculation(struct xyz normal, struct xyz tangent, struct xyz bitangent,
struct TextureSet* texture,
unsigned tx, unsigned ty,
unsigned lx, unsigned ly,
struct xyz point_in_wall, unsigned sectorno)
{
struct xyz perturbed_normal = PerturbNormal(normal,tangent,bitangent,
texture->normalmap[tx][ty]);
// For each lightsource, check if there is an obstacle
// in between this vertex and the lightsource. Calculate
// the ambient light levels from the fact.
// This simulates diffuse light.
struct xyz color = {0,0,0};
for(unsigned l=0; l<NumLights; ++l)
{
const struct light* light = &lights[l];
struct xyz source = { point_in_wall.x + normal.x * 1e-5f,
point_in_wall.y + normal.y * 1e-5f,
point_in_wall.z + normal.z * 1e-5f };
for(unsigned qa=0; qa<narealightcomponents; ++qa)
{
struct xyz target = { light->where.x + avec[qa].x, light->where.y + avec[qa].y, light->where.z + avec[qa].z };
struct xyz towards = { target.x-source.x, target.y-source.y, target.z-source.z };
float len = vlen(towards.x, towards.y, towards.z), invlen = 1.0f / len;
towards.x *= invlen;
towards.y *= invlen;
towards.z *= invlen;
float cosine = vdot3(perturbed_normal.x,perturbed_normal.y,perturbed_normal.z, towards.x,towards.y,towards.z);
//if(cosine > 1) fprintf(stderr, "cosine = %.3f\n", cosine);
float power = cosine / (1.f + powf(len / fade_distance_diffuse, 2.0f));
power /= (float) narealightcomponents;
if(power > 1e-7f)
{
struct Intersection i;
if(IntersectRay(source, sectorno, target, light->sector, &i) == 0)
{
color.x += light->light.x * power;
color.y += light->light.y * power;
color.z += light->light.z * power;
} } } }
PutColor(&texture->lightmap[lx][ly], color);
}
static void RadiosityCalculation(struct xyz normal, struct xyz tangent, struct xyz bitangent,
struct TextureSet* texture,
unsigned tx, unsigned ty,
unsigned lx, unsigned ly,
struct xyz point_in_wall, unsigned sectorno)
{
struct xyz perturbed_normal = PerturbNormal(normal,tangent,bitangent,
texture->normalmap[tx][ty]);
// Shoot rays to each random direction and see what it hits.
// Take the last round's light value from that location.
struct xyz source = { point_in_wall.x + normal.x * 1e-3f,
point_in_wall.y + normal.y * 1e-3f,
point_in_wall.z + normal.z * 1e-3f };
float basepower = radiomul / nrandomvectors;
// Apply the set of random vectors to this surface.
// This produces a set of vectors all pointing away
// from the wall to random directions.
struct xyz color = {0,0,0};
for(unsigned qq=0; qq<nrandomvectors; ++qq)
{
struct xyz rvec = tvec[qq];
// If the random vector points to the wrong side from the wall, flip it
if(vdot3(rvec.x, rvec.y, rvec.z, normal.x, normal.y, normal.z) < 0)
{
rvec.x = -rvec.x;
rvec.y = -rvec.y;
rvec.z = -rvec.z;
}
struct xyz target = { source.x + rvec.x * 512.f,
source.y + rvec.y * 512.f,
source.z + rvec.z * 512.f };
struct Intersection i;
if(IntersectRay(source, sectorno, target, -1 /* no particular sector */, &i) == 1) // hit something
{
float cosine = vdot3(perturbed_normal.x, i.normal.x,
perturbed_normal.y, i.normal.y,
perturbed_normal.z, i.normal.z) * basepower;
float len = vlen(i.where.x-source.x, i.where.y-source.y, i.where.z-source.z);
float power = abs(cosine) / (1.f + powf(len / fade_distance_radiosity, 2.0f));
color.x += ((i.sample >> 16) & 0xFF) * power;
color.y += ((i.sample >> 8) & 0xFF) * power;
color.z += ((i.sample >> 0) & 0xFF) * power;
} }
AddColor(&texture->lightmap[lx][ly], color);
}
static void Begin_Radiosity(struct TextureSet* set)
{
memcpy(&set->lightmap, &set->lightmap_diffuseonly, sizeof(Texture));
}
static double End_Radiosity(struct TextureSet* set, const char* label)
{
long differences = 0;
for(unsigned x=0; x<1024; ++x)
for(unsigned y=0; y<1024; ++y)
{
int old = set->lightmap_diffuseonly[x][y];
int r = (old >> 16) & 0xFF, g = (old >> 8) & 0xFF, b = (old) & 0xFF;
int new = set->lightmap[x][y];
r -= (new >> 16) & 0xFF; g -= (new >> 8) & 0xFF; b -= (new) & 0xFF;
differences += abs(r) + abs(g) + abs(b);
}
double result = differences / (double)(1024*1024);
fprintf(stderr, "Differences in %s: %g\33[K\n", label, result);
return result;
}
static void End_Diffuse(struct TextureSet* set)
{
memcpy(&set->lightmap_diffuseonly, &set->lightmap, sizeof(Texture));
}
#ifdef _OPENMP
# include <omp.h>
#define OMP_SCALER_LOOP_BEGIN(a,b,c,d,e,f) do { \
int this_thread = omp_get_thread_num(), num_threads = omp_get_num_threads(); \
int my_start = (this_thread ) * ((c)-(a)) / num_threads + (a); \
int my_end = (this_thread+1) * ((c)-(a)) / num_threads + (a); \
struct Scaler e##int = Scaler_Init(a, my_start, (c)-1, (d) * 32768, (f) * 32768); \
for(int b = my_start; b < my_end; ++b) \
{ \
float e = Scaler_Next(&e##int) / 32768.f;
#else
#define OMP_SCALER_LOOP_BEGIN(a,b,c,d,e,f) do { \
struct Scaler e##int = Scaler_Init(a, a, (c)-1, (d) * 32768, (f) * 32768); \
for(int b = (a); b < (c); ++b) \
{ \
float e = Scaler_Next(&e##int) / 32768.f;
#endif
#define OMP_SCALER_LOOP_END() \
} } while(0)
/* My lightmap calculation involves some raytracing.
* There are faster ways to do it, but this is the only way I know how to do it in software.
*/
static void BuildLightmaps(void)
{
for(unsigned round=firstround; round<=maxrounds; ++round)
{
fprintf(stderr, "Lighting calculation, round %u...\n", round);
#ifndef _OPENMP
fprintf(stderr, "Note: This would probably go faster if you enabled OpenMP in your compiler options. It's -fopenmp in GCC and Clang.\n");
#endif
// Create uniformly distributed random unit vectors
for(unsigned n=0; n<nrandomvectors; ++n)
{
double u = (rand() % 1000000) / 1e6; // 0..1
double v = (rand() % 1000000) / 1e6; // 0..1
double theta = 2*3.141592653*u;
double phi = acos(2*v-1);
tvec[n].x = cos(theta) * sin(phi);
tvec[n].y = sin(theta) * sin(phi);
tvec[n].z = cos(phi);
}
// A lightsource is represented by a spherical cloud
// of smaller lightsources around the actual lightsource.
// This achieves smooth edges for the shadows.
#define drand(m) ((rand()%1000-500)*5e-2*m)
for(unsigned qa=0; qa<narealightcomponents; ++qa)
{
double len;
do {
avec[qa] = (struct xyz){ drand(100.0), drand(100.0), drand(100.0) };
len = sqrt(avec[qa].x*avec[qa].x + avec[qa].y*avec[qa].y + avec[qa].z*avec[qa].z);
} while(len < 1e-3);
avec[qa].x *= area_light_radius/len;
avec[qa].y *= area_light_radius/len;
avec[qa].z *= area_light_radius/len;
}
#undef drand
fprintf(stderr, "Note: You can interrupt this program at any time you want. If you wish to resume\n"
" the lightmap calculation at a later date, use the --rebuild commandline option.\n"
" If you have already finished round 1 (diffuse light), and don't wish to do that\n"
" again, change the '#define firstround' value to your liking. Value 1 means\n"
" it starts from beginning, and any value from 2-100 (actual value is not important)\n"
" means to progressively improve the radiosity (cumulative). The current value is %d.\n",
firstround);
double total_differences = 0;
for(unsigned sectorno=0; sectorno<NumSectors; ++sectorno)
{
struct sector* const sect = §ors[sectorno];
const struct xy* const vert = sect->vertex;
double sector_differences = 0;
if(1) // Do ceiling and floor
{
struct xy bounding_min = {1e9f, 1e9f}, bounding_max = {-1e9f, -1e9f};
GetSectorBoundingBox(sectorno, &bounding_min, &bounding_max);
struct xyz floornormal = (struct xyz){0, 1, 0}; // floor
struct xyz floortangent = (struct xyz){1, 0, 0};
struct xyz floorbitangent = vxs3(floornormal.x,floornormal.y,floornormal.z, floortangent.x,floortangent.y,floortangent.z);
struct xyz ceilnormal = (struct xyz){0,-1, 0}; // ceiling
struct xyz ceiltangent = (struct xyz){1, 0, 0};
struct xyz ceilbitangent = vxs3(ceilnormal.x,ceilnormal.y,ceilnormal.z, ceiltangent.x,ceiltangent.y,ceiltangent.z);
fprintf(stderr, "Bounding box for sector %d/%d: %g,%g - %g,%g\n",
sectorno+1,NumSectors, bounding_min.x,bounding_min.y, bounding_max.x,bounding_max.y);
// Round 1: Check lightsources
if(round == 1)
{
struct Scaler txtx_int = Scaler_Init(0,0,1023, bounding_min.x*32768, bounding_max.x*32768);
for(unsigned x=0; x<1024; ++x)
{
fprintf(stderr, "- Sector %d ceils&floors, %u/%u diffuse light...\r", sectorno+1, x,1024);
float txtx = Scaler_Next(&txtx_int)/32768.f;
// For better cache locality, first do floors and then ceils
#pragma omp parallel
OMP_SCALER_LOOP_BEGIN(0,y,1024, bounding_min.y, txty, bounding_max.y);
DiffuseLightCalculation(floornormal, floortangent, floorbitangent, sect->floortexture,
((unsigned)(txtx*256)) % 1024, ((unsigned)(txty*256)) % 1024,
x,y,
(struct xyz){txtx, sect->floor, txty}, sectorno);
OMP_SCALER_LOOP_END();
#pragma omp parallel
OMP_SCALER_LOOP_BEGIN(0,y,1024, bounding_min.y, txty, bounding_max.y);
DiffuseLightCalculation(ceilnormal, ceiltangent, ceilbitangent, sect->ceiltexture,
((unsigned)(txtx*256)) % 1024, ((unsigned)(txty*256)) % 1024,
x,y,
(struct xyz){txtx, sect->ceil, txty}, sectorno);
OMP_SCALER_LOOP_END();
}
fprintf(stderr, "\n");
End_Diffuse(sect->floortexture);
End_Diffuse(sect->ceiltexture);
}
else
{
// Round 2+: Radiosity
Begin_Radiosity(sect->floortexture);
Begin_Radiosity(sect->ceiltexture);
// Calculate radiosity in decreased resolution
struct Scaler txtx_int = Scaler_Init(0,0,1023, bounding_min.x*32768, bounding_max.x*32768);
for(unsigned x=0; x<1024; ++x)
{
float txtx = Scaler_Next(&txtx_int)/32768.f;
fprintf(stderr, "- Sector %u ceils&floors, %u/%u radiosity...\r", sectorno+1, x,1024);
#pragma omp parallel
OMP_SCALER_LOOP_BEGIN(0,y,1024, bounding_min.y, txty, bounding_max.y);
RadiosityCalculation(floornormal, floortangent, floorbitangent, sect->ceiltexture,
((unsigned)(txtx*256)) % 1024, ((unsigned)(txty*256)) % 1024, x,y,
(struct xyz){txtx, sect->floor, txty}, sectorno);
OMP_SCALER_LOOP_END();
#pragma omp parallel
OMP_SCALER_LOOP_BEGIN(0,y,1024, bounding_min.y, txty, bounding_max.y);
RadiosityCalculation(ceilnormal, ceiltangent, ceilbitangent, sect->ceiltexture,
((unsigned)(txtx*256)) % 1024, ((unsigned)(txty*256)) % 1024, x,y,
(struct xyz){txtx, sect->ceil, txty}, sectorno);
OMP_SCALER_LOOP_END();
}
char Buf[128];
sprintf(Buf, "Sector %u floors", sectorno+1); sector_differences += End_Radiosity(sect->floortexture, Buf);
sprintf(Buf, "Sector %u ceils", sectorno+1); sector_differences += End_Radiosity(sect->ceiltexture, Buf);
}
}
if(1)for(unsigned s=0; s<sect->npoints; ++s)
{
float xd = vert[s+1].x - vert[s].x;
float zd = vert[s+1].y - vert[s].y;
float len = vlen(xd,zd,0);
struct xyz normal = {-zd/len, 0, xd/len};
struct xyz tangent = {xd/len, 0, zd/len};
struct xyz bitangent = {0,1,0};
float hole_low = 9e9, hole_high = -9e9;
if(sect->neighbors[s] >= 0)
{
hole_low = max( sect->floor, sectors[sect->neighbors[s]].floor );
hole_high = min( sect->ceil, sectors[sect->neighbors[s]].ceil );
}
if(round == 1)
{
// Round 1: Check lightsources
struct Scaler txtx_int = Scaler_Init(0,0,1023, vert[s].x*32768,vert[s+1].x*32768);
struct Scaler txtz_int = Scaler_Init(0,0,1023, vert[s].y*32768,vert[s+1].y*32768);
for(unsigned x=0; x<1024; ++x)
{
float txtx = Scaler_Next(&txtx_int)/32768.f;
float txtz = Scaler_Next(&txtz_int)/32768.f;
fprintf(stderr, "- Sector %u Wall %u/%u %u/%u diffuse light...\r", sectorno+1, s+1, sect->npoints, x,1024);
#pragma omp parallel
OMP_SCALER_LOOP_BEGIN(0,y,1024, sect->ceil, txty, sect->floor);
struct TextureSet* texture = §->uppertextures[s];
if(sect->neighbors[s] >= 0 && txty < hole_high)
{
if(txty > hole_low) continue;
texture = §->lowertextures[s];
}
struct xyz point_in_wall = { txtx, txty, txtz };
DiffuseLightCalculation(normal, tangent, bitangent, texture, x,y, x,y,
point_in_wall, sectorno);
OMP_SCALER_LOOP_END();
}
End_Diffuse(§->uppertextures[s]);
End_Diffuse(§->lowertextures[s]);
}
else
{
Begin_Radiosity(§->uppertextures[s]);
Begin_Radiosity(§->lowertextures[s]);
// Round 2+: Radiosity
struct Scaler txtx_int = Scaler_Init(0,0,1023, vert[s].x*32768,vert[s+1].x*32768);
struct Scaler txtz_int = Scaler_Init(0,0,1023, vert[s].y*32768,vert[s+1].y*32768);
for(unsigned x=0; x<1024; ++x)
{
float txtx = Scaler_Next(&txtx_int)/32768.f;
float txtz = Scaler_Next(&txtz_int)/32768.f;
fprintf(stderr, "- Sector %u Wall %u/%u %u/%u radiosity...\r", sectorno+1, s+1, sect->npoints, x,1024);
#pragma omp parallel
OMP_SCALER_LOOP_BEGIN(0,y,1024, sect->ceil, txty, sect->floor);
struct TextureSet* texture = §->uppertextures[s];
if(sect->neighbors[s] >= 0 && txty < hole_high)
{
if(txty > hole_low) continue;
texture = §->lowertextures[s];
}
struct xyz point_in_wall = { txtx, txty, txtz };
RadiosityCalculation(normal, tangent, bitangent, texture, x,y, x,y, point_in_wall, sectorno);
OMP_SCALER_LOOP_END();
}
char Buf[128];
sprintf(Buf, "Sector %u wall %u lower texture", sectorno+1, s+1); sector_differences += End_Radiosity(§->uppertextures[s], Buf);
sprintf(Buf, "Sector %u wall %u upper texture", sectorno+1, s+1); sector_differences += End_Radiosity(§->lowertextures[s], Buf);
}
fprintf(stderr, "\n");
}
fprintf(stderr, "Round %u differences in sector %u: %g\n", round, sectorno+1, sector_differences);
total_differences += sector_differences;
}
fprintf(stderr, "Round %u differences total: %g.\n", round, total_differences);
if(total_differences < 1e-6)
{
break;
}
}
}
#endif
#endif
// Helper function for the antialiased line algorithm.
#define fpart(x) ((x) < 0 ? 1 - ((x) - floorf(x)) : (x) - floorf(x))
#define rfpart(x) (1 - fpart(x))
static void plot(int x,int y, float opacity, int color)
{
opacity = powf(opacity, 1/2.2f);
int *pix = ((int*) surface->pixels) + y * W2 + x;
int r0 = (*pix >> 16) & 0xFF, r1 = (color >> 16) & 0xFF;
int g0 = (*pix >> 8) & 0xFF, g1 = (color >> 8) & 0xFF;
int b0 = (*pix >> 0) & 0xFF, b1 = (color >> 0) & 0xFF;
int r = max(r0, opacity*r1);
int g = max(g0, opacity*g1);
int b = max(b0, opacity*b1);
*pix = (r << 16) | (g << 8) | b;
}
static void line(float x0,float y0, float x1,float y1, int color)
{
// Xiaolin Wu's antialiased line algorithm from Wikipedia.
int steep = fabsf(y1-y0) > fabsf(x1-x0);
if(steep) { float tmp; tmp=x0; x0=y0; y0=tmp; tmp=x1; x1=y1; y1=tmp; }
if(x0 > x1) { float tmp; tmp=x0; x0=x1; x1=tmp; tmp=y0; y0=y1; y1=tmp; }
float dx = x1-x0, dy = y1-y0, gradient = dy/dx;
// handle first endpoint
int xend = (int)(x0 + 0.5f);
int yend = y0 + gradient * (xend - x0);
float xgap = rfpart(x0 + 0.5f);
int xpxl1 = xend; // this will be used in the main loop
int ypxl1 = (int)(yend);
if(steep)
{
plot(ypxl1, xpxl1, rfpart(yend) * xgap, color);
plot(ypxl1+1, xpxl1, fpart(yend) * xgap, color);
}
else
{
plot(xpxl1, ypxl1 , rfpart(yend) * xgap, color);
plot(xpxl1, ypxl1+1, fpart(yend) * xgap, color);
}
float intery = yend + gradient; // first y-intersection for the main loop
// handle second endpoint
xend = (int)(x1 + 0.5f);
yend = y1 + gradient * (xend - x1);
xgap = fpart(x1 + 0.5);
int xpxl2 = xend; //this will be used in the main loop
int ypxl2 = (int)(yend);
if(steep)
{
plot(ypxl2 , xpxl2, rfpart(yend) * xgap, color);
plot(ypxl2+1, xpxl2, fpart(yend) * xgap, color);
}
else
{
plot(xpxl2, ypxl2, rfpart(yend) * xgap, color);
plot(xpxl2, ypxl2+1, fpart(yend) * xgap, color);
}
// main loop
for(int x = xpxl1 + 1; x < xpxl2; ++x, intery += gradient)
if(steep)
{
plot((int)(intery) , x, rfpart(intery), color);
plot((int)(intery)+1, x, fpart(intery), color);
}
else
{
plot(x, (int)(intery), rfpart(intery), color);
plot(x, (int)(intery)+1, fpart(intery), color);
}
}
// BloomPostprocess adds some bloom to the 2D map image. It is merely a cosmetic device.
static void BloomPostprocess(void)
{
const int blur_width = W/120, blur_height = H/90;
float blur_kernel[blur_height*2+1][blur_width*2+1];
for(int y=-blur_height; y<=blur_height; ++y)
{
for(int x=-blur_width; x<=blur_width; ++x)
{
float value = expf(-(x*x+y*y) / (2.f*(0.5f*max(blur_width,blur_height))));
blur_kernel[y+blur_height][x+blur_width] = value * 0.3f;
//printf("%10.3f", value);
}
//printf("\n");
}
static int pixels_original[W2*H];
static struct pixel { float r,g,b,brightness; } img[W2*H];
memcpy(pixels_original, surface->pixels, sizeof(pixels_original));
int *pix = ((int*) surface->pixels);
for(unsigned y=0; y<H; ++y)
for(unsigned x=0; x<W2; ++x)
{
int original_pixel = pixels_original[y*W2+x];
float r = (original_pixel >> 16) & 0xFF;
float g = (original_pixel >> 8) & 0xFF;
float b = (original_pixel >> 0) & 0xFF;
float wanted_br = original_pixel == 0xFFFFFF ? 1
: original_pixel == 0x55FF55 ? 0.6
: original_pixel == 0xFFAA55 ? 1
: 0.1;
float brightness = powf((r*0.299f + g*0.587f + b*0.114f) / 255.f, 12.f / 2.2f);
brightness = (brightness*0.2f + wanted_br * 0.3f + max(max(r,g),b)*0.5f/255.f);
img[y*W2+x] = (struct pixel) { r,g,b,brightness };
}
#pragma omp parallel for schedule(static) collapse(2)
for(unsigned y=0; y<H; ++y)
#ifdef SplitScreen
for(unsigned x=W; x<W2; ++x)
#else
for(unsigned x=0; x<W; ++x)
#endif
{
int ypmin = max(0, (int)y - blur_height), ypmax = min(H-1, (int)y + blur_height);
int xpmin = max(0, (int)x - blur_width), xpmax = min(W-1, (int)x + blur_width);
float rsum = img[y*W2+x].r;
float gsum = img[y*W2+x].g;
float bsum = img[y*W2+x].b;
for(int yp = ypmin; yp <= ypmax; ++yp)
for(int xp = xpmin; xp <= xpmax; ++xp)
{
float r = img[yp*W2+xp].r;
float g = img[yp*W2+xp].g;
float b = img[yp*W2+xp].b;
float brightness = img[yp*W2+xp].brightness;
float value = brightness * blur_kernel[yp+blur_height-(int)y][xp+blur_width-(int)x];
rsum += r * value;
gsum += g * value;
bsum += b * value;
}
int color = (((int)clamp(rsum,0,255)) << 16)
+ (((int)clamp(gsum,0,255)) << 8)
+ (((int)clamp(bsum,0,255)) << 0);
pix[y*W2+x] = color;
}
}
/* fillpolygon draws a filled polygon -- used only in the 2D map rendering. */
static void fillpolygon(const struct sector* sect, int color)
{
#ifdef SplitScreen
float square = min(W/20.f/0.8, H/29.f), X = (W2-W)/20.f/*square*0.8*/, Y = square, X0 = W+X*1.f/*(W-18*square*0.8)/2*/, Y0 = (H-28*square)/2;
#else
float square = min(W/20.f/0.8, H/29.f), X = square*0.8, Y = square, X0 = (W-18*square*0.8)/2, Y0 = (H-28*square)/2;
#endif
const struct xy* const vert = sect->vertex;
// Find the minimum and maximum Y coordinates
float miny = 9e9, maxy = -9e9;
for(unsigned a = 0; a < sect->npoints; ++a)
{
miny = min(miny, 28-vert[a].x);
maxy = max(maxy, 28-vert[a].x);
}
miny = Y0 + miny*Y; maxy = Y0 + maxy*Y;
// Scan each line within this range
for(int y = max(0, (int)(miny+0.5)); y <= min(H-1, (int)(maxy+0.5)); ++y)
{
// Find all intersection points on this scanline
float intersections[W2];
unsigned num_intersections = 0;
for(unsigned a = 0; a < sect->npoints && num_intersections < W; ++a)
{
float x0 = X0+vert[a].y*X, x1 = X0+vert[a+1].y*X;
float y0 = Y0+(28-vert[a].x)*Y, y1 = Y0+(28-vert[a+1].x)*Y;
if(IntersectBox(x0,y0,x1,y1, 0,y,W2-1,y))
{
struct xy point = Intersect(x0,y0,x1,y1, 0,y,W2-1,y);
if(isnan(point.x) || isnan(point.y)) continue;
// Insert it in intersections[] keeping it sorted.
// Sorting complexity: n log n
unsigned begin = 0, end = num_intersections, len = end-begin;
while(len)
{
unsigned middle = begin + len/2;
if(intersections[middle] < point.x)
{ begin = middle++; len = len - len/2 - 1; }
else
len /= 2;
}
for(unsigned n = num_intersections++; n > begin; --n)
intersections[n] = intersections[n-1];
intersections[begin] = point.x;
}
}
// Draw lines
for(unsigned a = 0; a+1 < num_intersections; a += 2)
line(clamp(intersections[a], 0,W2-1), y,
clamp(intersections[a+1],0,W2-1), y, color);
//printf("line(%f,%d, %f,%d)\n", minx,y, maxx,y);
}
}
static void DrawMap(void)
{
static unsigned process = ~0u; ++process;
// Render the 2D map on screen
SDL_LockSurface(surface);
#ifdef SplitScreen
for(unsigned y=0; y<H; ++y)
memset( (char*)surface->pixels + (y*W2 + W)*4, 0, (W2-W)*4);
#else
for(unsigned y=0; y<H; ++y)
memset( (char*)surface->pixels + (y*W2)*4, 0, (W)*4);
#endif
#ifdef SplitScreen
float square = min(W/20.f/0.8, H/29.f), X = (W2-W)/20.f/*square*0.8*/, Y = square, X0 = W+X*1.f/*(W-18*square*0.8)/2*/, Y0 = (H-28*square)/2;
#else
float square = min(W/20.f/0.8, H/29.f), X = square*0.8, Y = square, X0 = (W-18*square*0.8)/2, Y0 = (H-28*square)/2;
#endif
for(float x=0; x<=18; ++x) line(X0+x*X, Y0+0*Y, X0+ x*X, Y0+28*Y, 0x002200);
for(float y=0; y<=28; ++y) line(X0+0*X, Y0+y*Y, X0+18*X, Y0+ y*Y, 0x002200);
#ifdef VisibilityTracking
for(unsigned c=0; c<NumSectors; ++c) if(sectors[c].visible) fillpolygon(§ors[c], 0x220000);
#endif
fillpolygon(§ors[player.sector], 0x440000);
#ifdef VisibilityTracking
for(unsigned c=0; c<NumVisibleSectors; ++c)
{
/*struct xy vert[W*2];
struct sector temp_sector = {0,0,vert,0,0,0};
for(unsigned x=0; x<W; ++x) if(VisibleFloors[c][x]) vert[temp_sector.npoints++] = VisibleFloorBegins[c][x];
for(unsigned x=W; x-- > 0; ) if(VisibleFloors[c][x]) vert[temp_sector.npoints++] = VisibleFloorEnds[c][x];
fillpolygon(&temp_sector, 0x222200);
temp_sector.npoints = 0;
for(unsigned x=0; x<W; ++x) if(VisibleCeils[c][x]) vert[temp_sector.npoints++] = VisibleCeilBegins[c][x];
for(unsigned x=W; x-- > 0; ) if(VisibleCeils[c][x]) vert[temp_sector.npoints++] = VisibleCeilEnds[c][x];
fillpolygon(&temp_sector, 0x220022);*/
for(unsigned x=0; x<W; ++x)
{
if(VisibleFloors[c][x])
line(clamp(X0 + VisibleFloorBegins[c][x].y*X, 0,W2-1), clamp(Y0 + (28-VisibleFloorBegins[c][x].x)*Y, 0,H-1),
clamp(X0 + VisibleFloorEnds[c][x].y*X, 0,W2-1), clamp(Y0 + (28-VisibleFloorEnds[c][x].x)*Y, 0,H-1),
0x222200);
if(VisibleCeils[c][x])
line(clamp(X0 + VisibleCeilBegins[c][x].y*X, 0,W2-1), clamp(Y0 + (28-VisibleCeilBegins[c][x].x)*Y, 0,H-1),
clamp(X0 + VisibleCeilEnds[c][x].y*X, 0,W2-1), clamp(Y0 + (28-VisibleCeilEnds[c][x].x)*Y, 0,H-1),
0x28003A);
}
}
/*for(unsigned n=0; n<NumVisibleFloors; ++n)
{
printf("%g,%g - %g,%g\n", VisibleFloorBegins[n].x, VisibleFloorBegins[n].y,
VisibleFloorEnds[n].x, VisibleFloorEnds[n].y );
line( X0+VisibleFloorBegins[n].x*X, Y0+VisibleFloorBegins[n].y*Y,
X0+VisibleFloorEnds[n].x*X, Y0+VisibleFloorEnds[n].y*Y,
n*0x010101
//0x550055
);
}*/
#endif
for(unsigned c=0; c<NumSectors; ++c)
{
unsigned a = c;
if(a == player.sector && player.sector != (NumSectors-1))
a = NumSectors-1;
else if(a == NumSectors-1)
a = player.sector;
const struct sector* const sect = §ors[a];
const struct xy* const vert = sect->vertex;
for(unsigned b = 0; b < sect->npoints; ++b)
{
float x0 = 28-vert[b].x, x1 = 28-vert[b+1].x;
unsigned vertcolor = a==player.sector ? 0x55FF55
#ifdef VisibilityTracing
: sect->visible ? 0x55FF55
#endif
: 0x00AA00;
line( X0+vert[b].y*X, Y0+x0*Y,
X0+vert[b+1].y*X, Y0+x1*Y,
(a == player.sector)
? (sect->neighbors[b] >= 0 ? 0xFF5533 : 0xFFFFFF)
#ifdef VisibilityTracing
: (sect->visible)
? (sect->neighbors[b] >= 0 ? 0xFF3333 : 0xAAAAAA)
#endif
: (sect->neighbors[b] >= 0 ? 0x880000 : 0x6A6A6A)
);
line( X0+vert[b].y*X-2, Y0+x0*Y-2, X0+vert[b].y*X+2, Y0+x0*Y-2, vertcolor);
line( X0+vert[b].y*X-2, Y0+x0*Y-2, X0+vert[b].y*X-2, Y0+x0*Y+2, vertcolor);
line( X0+vert[b].y*X+2, Y0+x0*Y-2, X0+vert[b].y*X+2, Y0+x0*Y+2, vertcolor);
line( X0+vert[b].y*X-2, Y0+x0*Y+2, X0+vert[b].y*X+2, Y0+x0*Y+2, vertcolor);
}
}
float c = player.anglesin, s = -player.anglecos;
float px = player.where.y, tx = px+c*0.8f, qx0 = px+s*0.2f, qx1=px-s*0.2f;
float py = 28-player.where.x, ty = py+s*0.8f, qy0 = py-c*0.2f, qy1=py+c*0.2f;
px = clamp(px,-.4f,18.4f); tx = clamp(tx,-.4f,18.4f); qx0 = clamp(qx0,-.4f,18.4f); qx1 = clamp(qx1,-.4f,18.4f);
py = clamp(py,-.4f,28.4f); ty = clamp(ty,-.4f,28.4f); qy0 = clamp(qy0,-.4f,28.4f); qy1 = clamp(qy1,-.4f,28.4f);
line(X0 + px*X, Y0 + py*Y, X0 + tx*X, Y0 + ty*Y, 0x5555FF);
line(X0 +qx0*X, Y0 +qy0*Y, X0 +qx1*X, Y0 +qy1*Y, 0x5555FF);
BloomPostprocess();
SDL_UnlockSurface(surface);
SaveFrame1();
//static unsigned skip=0;
//if(++skip >= 1) { skip=0; SDL_Flip(surface); }
}
static int vert_compare(const struct xy* a, const struct xy* b)
{
if(a->y != b->y) return (a->y - b->y) * 1e3;
return (a->x - b->x) * 1e3;
}
// Verify map for consistencies
static void VerifyMap(void)
{
//int phase=0;
Rescan:
//DrawMap(); SDL_Delay(100);
// Verify that the chain of vertexes forms a loop.
for(unsigned a=0; a<NumSectors; ++a)
{
const struct sector* const sect = §ors[a];
const struct xy* const vert = sect->vertex;
if(vert[0].x != vert[ sect->npoints ].x
|| vert[0].y != vert[ sect->npoints ].y)
{
fprintf(stderr, "Internal error: Sector %u: Vertexes don't form a loop!\n", a);
}
}
// Verify that for each edge that has a neighbor, the neighbor
// has this same neighbor.
for(unsigned a=0; a<NumSectors; ++a)
{
const struct sector* sect = §ors[a];
const struct xy* const vert = sect->vertex;
for(unsigned b = 0; b < sect->npoints; ++b)
{
if(sect->neighbors[b] >= (int)NumSectors)
{
fprintf(stderr, "Sector %u: Contains neighbor %d (too large, number of sectors is %u)\n",
a, sect->neighbors[b], NumSectors);
}
struct xy point1 = vert[b], point2 = vert[b+1];
int found = 0;
for(unsigned d = 0; d < NumSectors; ++d)
{
const struct sector* const neigh = §ors[d];
for(unsigned c = 0; c < neigh->npoints; ++c)
{
if(neigh->vertex[c+1].x == point1.x
&& neigh->vertex[c+1].y == point1.y
&& neigh->vertex[c+0].x == point2.x
&& neigh->vertex[c+0].y == point2.y)
{
if(neigh->neighbors[c] != (int)a)
{
fprintf(stderr, "Sector %d: Neighbor behind line (%g,%g)-(%g,%g) should be %u, %d found instead. Fixing.\n",
d, point2.x,point2.y, point1.x,point1.y, a, neigh->neighbors[c]);
neigh->neighbors[c] = a;
goto Rescan;
}
if(sect->neighbors[b] != (int)d)
{
fprintf(stderr, "Sector %u: Neighbor behind line (%g,%g)-(%g,%g) should be %u, %d found instead. Fixing.\n",
a, point1.x,point1.y, point2.x,point2.y, d, sect->neighbors[b]);
sect->neighbors[b] = d;
goto Rescan;
}
else
++found;
}
}
}
if(sect->neighbors[b] >= 0 && sect->neighbors[b] < (int)NumSectors && found != 1)
fprintf(stderr, "Sectors %u and its neighbor %d don't share line (%g,%g)-(%g,%g)\n",
a, sect->neighbors[b],
point1.x,point1.y, point2.x,point2.y);
}
}
// Verify that the vertexes form a convex hull.
for(unsigned a=0; a<NumSectors; ++a)
{
struct sector* sect = §ors[a];
const struct xy* const vert = sect->vertex;
for(unsigned b = 0; b < sect->npoints; ++b)
{
unsigned c = (b+1) % sect->npoints, d = (b+2) % sect->npoints;
float x0 = vert[b].x, y0 = vert[b].y;
float x1 = vert[c].x, y1 = vert[c].y;
switch(PointSide(vert[d].x,vert[d].y, x0,y0, x1,y1))
{
case 0:
continue;
// Note: This used to be a problem for my engine, but is not anymore, so it is disabled.
// If you enable this change, you will not need the IntersectBox calls in some locations anymore.
if(sect->neighbors[b] == sect->neighbors[c]) continue;
fprintf(stderr, "Sector %u: Edges %u-%u and %u-%u are parallel, but have different neighbors. This would pose problems for collision detection.\n",
a, b,c, c,d);
break;
case -1:
fprintf(stderr, "Sector %u: Edges %u-%u and %u-%u create a concave turn. This would be rendered wrong.\n",
a, b,c, c,d);
break;
default:
// This edge is fine.
continue;
}
fprintf(stderr, "- Splitting sector, using (%g,%g) as anchor", vert[c].x,vert[c].y);
// Insert an edge between (c) and (e),
// where e is the nearest point to (c), under the following rules:
// e cannot be c, c-1 or c+1
// line (c)-(e) cannot intersect with any edge in this sector
float nearest_dist = 1e29f;
unsigned nearest_point = ~0u;
for(unsigned n = (d+1) % sect->npoints; n != b; n = (n+1)% sect->npoints) // Don't go through b,c,d
{
float x2 = vert[n].x, y2 = vert[n].y;
float distx = x2-x1, disty = y2-y1;
float dist = distx*distx + disty*disty;
if(dist >= nearest_dist) continue;
if(PointSide(x2,y2, x0,y0, x1,y1) != 1) continue;
int ok = 1;
x1 += distx*1e-4f; x2 -= distx*1e-4f;
y1 += disty*1e-4f; y2 -= disty*1e-4f;
for(unsigned f = 0; f < sect->npoints; ++f)
if(IntersectLineSegments(x1,y1, x2,y2,
vert[f].x,vert[f].y, vert[f+1].x, vert[f+1].y))
{ ok = 0; break; }
if(!ok) continue;
// Check whether this split would resolve the original problem
if(PointSide(x2,y2, vert[d].x,vert[d].y, x1,y1) == 1) dist += 1e6f;
if(dist >= nearest_dist) continue;
nearest_dist = dist;
nearest_point = n;
}
if(nearest_point == ~0u)
{
fprintf(stderr, " - ERROR: Could not find a vertex to pair with!\n");
SDL_Delay(200);
continue;
}
unsigned e = nearest_point;
fprintf(stderr, " and point %u - (%g,%g) as the far point.\n", e, vert[e].x,vert[e].y);
// Now that we have a chain: a b c d e f g h
// And we're supposed to split it at "c" and "e", the outcome should be two chains:
// c d e (c)
// e f g h a b c (e)
struct xy* vert1 = malloc(sect->npoints * sizeof(*vert1));
struct xy* vert2 = malloc(sect->npoints * sizeof(*vert2));
signed char* neigh1 = malloc(sect->npoints * sizeof(*neigh1));
signed char* neigh2 = malloc(sect->npoints * sizeof(*neigh2));
// Create chain 1: from c to e.
unsigned chain1_length = 0;
for(unsigned n = 0; n < sect->npoints; ++n)
{
unsigned m = (c + n) % sect->npoints;
neigh1[chain1_length] = sect->neighbors[m];
vert1[chain1_length++] = sect->vertex[m];
if(m == e) { vert1[chain1_length] = vert1[0]; break; }
}
neigh1[chain1_length-1] = NumSectors;
// Create chain 2: from e to c.
unsigned chain2_length = 0;
for(unsigned n = 0; n < sect->npoints; ++n)
{
unsigned m = (e + n) % sect->npoints;
neigh2[chain2_length] = sect->neighbors[m];
vert2[chain2_length++] = sect->vertex[m];
if(m == c) { vert2[chain2_length] = vert2[0]; break; }
}
neigh2[chain2_length-1] = a;
// Change sect into using chain1.
free(sect->vertex); sect->vertex = vert1;
free(sect->neighbors); sect->neighbors = neigh1;
sect->npoints = chain1_length;
// Create another sector that uses chain2.
sectors = realloc(sectors, ++NumSectors * sizeof(*sectors));
sect = §ors[a];
sectors[NumSectors-1] = (struct sector) { sect->floor, sect->ceil, vert2, chain2_length, neigh2 };
// The other sector may now have neighbors that think
// their neighbor is still the old sector. Rescan to fix it.
goto Rescan;
}
}
//printf("PHASE 2\n"); SDL_Delay(500);
//if(phase == 0) { phase = 1; goto Rescan; }
printf("%d sectors.\n", NumSectors);
#if 0
/* This code creates the simplified map file for the program featured in the YouTube video. */
FILE *fp = fopen("map-clear.txt", "wt");
unsigned NumVertexes = 0;
struct xy vert[256];
int done[256];
for(unsigned n=0; n<NumSectors; ++n)
for(unsigned s=0; s<sectors[n].npoints; ++s)
{
struct xy point = sectors[n].vertex[(s+1)%sectors[n].npoints];
unsigned v=0;
for(; v<NumVertexes; ++v)
if(point.x == vert[v].x && point.y == vert[v].y)
break;
if(v == NumVertexes)
{ done[NumVertexes] = -1; vert[NumVertexes++] = point; }
}
// Sort the vertexes by Y coordinate, X coordinate
qsort(vert, NumVertexes, sizeof(*vert), vert_compare);
for(unsigned m=0,v=0; v<NumVertexes; ++v)
{
if(done[v] >= 0) continue;
fprintf(fp, "vertex\t%g\t%g", vert[v].y, vert[v].x); done[v] = m++;
for(unsigned v2=v+1; v2<NumVertexes; ++v2)
if(done[v2] < 0 && vert[v2].y == vert[v].y)
{ fprintf(fp, " %g", vert[v2].x); done[v2] = m++; }
fprintf(fp, "\n");
}
fprintf(fp, "\n");
for(unsigned n=0; n<NumSectors; ++n)
{
fprintf(fp, "sector\t%g %g\t", sectors[n].floor, sectors[n].ceil);
int wid = 0;
for(unsigned s=0; s<sectors[n].npoints; ++s)
{
struct xy point = sectors[n].vertex[(s+1)%sectors[n].npoints];
unsigned v=0;
for(; v<NumVertexes; ++v)
if(point.x == vert[v].x && point.y == vert[v].y)
break;
wid += fprintf(fp, " %u", done[v]);
}
fprintf(fp, "%*s", 24-wid, "");
for(unsigned s=0; s<sectors[n].npoints; ++s)
fprintf(fp, "%d ", sectors[n].neighbors[s]);
fprintf(fp, "\n");
}
fprintf(fp, "\nplayer\t%g %g\t%g\t%d\n", player.where.x, player.where.z, player.angle, player.sector);
fclose(fp);
#endif
}
#ifndef TextureMapping
/* vline: Draw a vertical line on screen, with a different color pixel in top & bottom */
static void vline(int x, int y1,int y2, int top,int middle,int bottom)
{
int *pix = (int*) surface->pixels;
y1 = clamp(y1, 0, H-1);
y2 = clamp(y2, 0, H-1);
if(y2 == y1)
pix[y1*W2+x] = middle;
else if(y2 > y1)
{
pix[y1*W2+x] = top;
for(int y=y1+1; y<y2; ++y) pix[y*W2+x] = middle;
pix[y2*W2+x] = bottom;
}
}
#endif
/* Moves the player by (dx,dy) in the map, and also updates
* their anglesin/anglecos/sector properties properly.
*/
static void MovePlayer(float dx, float dy)
{
float px = player.where.x, py = player.where.y;
/* Check if this movement crosses one of this sector's edges
* that have a neighboring sector on the other side.
* Because the edge vertices of each sector are defined in
* clockwise order, PointSide will always return -1 for a point
* that is outside the sector and 0 or 1 for a point that is inside.
*/
const struct sector* const sect = §ors[player.sector];
for(int s = 0; s < sect->npoints; ++s)
if(sect->neighbors[s] >= 0
&& IntersectBox(px,py, px+dx,py+dy,
sect->vertex[s+0].x, sect->vertex[s+0].y,
sect->vertex[s+1].x, sect->vertex[s+1].y)
&& PointSide(px+dx, py+dy,
sect->vertex[s+0].x, sect->vertex[s+0].y,
sect->vertex[s+1].x, sect->vertex[s+1].y) < 0)
{
player.sector = sect->neighbors[s];
printf("Player is now in sector %d\n", player.sector);
break;
}
player.where.x += dx;
player.where.y += dy;
player.anglesin = sinf(player.angle);
player.anglecos = cosf(player.angle);
}
#ifdef TextureMapping
static void vline2(int x, int y1,int y2, struct Scaler ty,unsigned txtx, const struct TextureSet* t)
{
int *pix = (int*) surface->pixels;
y1 = clamp(y1, 0, H-1);
y2 = clamp(y2, 0, H-1);
pix += y1 * W2 + x;
for(int y = y1; y <= y2; ++y)
{
unsigned txty = Scaler_Next(&ty);
#ifdef LightMapping
*pix = ApplyLight( t->texture[txtx % 1024][txty % 1024], t->lightmap[txtx % 1024][txty % 1024] );
#else
*pix = t->texture[txtx % 1024][txty % 1024];
#endif
pix += W2;
}
}
#endif
static void DrawScreen(void)
{
struct item { short sectorno,sx1,sx2; } queue[MaxQueue], *head=queue, *tail=queue;
short ytop[W]={0}, ybottom[W], renderedsectors[NumSectors];
for(unsigned x=0; x<W; ++x) ybottom[x] = H-1;
for(unsigned n=0; n<NumSectors; ++n) renderedsectors[n] = 0;
#ifdef VisibilityTracking
for(unsigned n=0; n<NumSectors; ++n) sectors[n].visible=0;
#endif
#ifdef VisibilityTracking
memset(VisibleFloors, 0, sizeof(VisibleFloors));
memset(VisibleCeils, 0, sizeof(VisibleCeils));
NumVisibleSectors=0;
#endif
/*Begin whole-screen rendering from where the player is. */
*head = (struct item) { player.sector, 0, W-1 };
if(++head == queue+MaxQueue) head = queue;
SDL_LockSurface(surface);
while(head != tail)
{
/* Pick a sector & slice from queue to draw */
const struct item now = *tail;
if(++tail == queue+MaxQueue) tail = queue;
if(renderedsectors[now.sectorno] & 0x21) continue; // Odd = still rendering, 0x20 = give up
++renderedsectors[now.sectorno];
#ifdef VisibilityTracking
sectors[now.sectorno].visible=1;
#endif
/* Render each wall of this sector that is facing towards player. */
const struct sector* const sect = §ors[now.sectorno];
#ifdef LightMapping
struct xy bounding_min = {1e9f, 1e9f}, bounding_max = {-1e9f, -1e9f};
GetSectorBoundingBox(now.sectorno, &bounding_min, &bounding_max);
#endif
/* This loop can be used to illustrate currently rendering window. Should be disabled otherwise. */
//for(unsigned x=now.sx1; x<=now.sx2; ++x)
// vline(x, ytop[x], ybottom[x], 0x003300, 0x00AA00, 0x003300);
for(int s = 0; s < sect->npoints; ++s)
{
/* Acquire the x,y coordinates of the two vertexes forming the edge of the sector */
/* Transform the vertices into the player's view */
float vx1 = sect->vertex[s+0].x - player.where.x, vy1 = sect->vertex[s+0].y - player.where.y;
float vx2 = sect->vertex[s+1].x - player.where.x, vy2 = sect->vertex[s+1].y - player.where.y;
/* Rotate them around the player's view */
float pcos = player.anglecos, psin = player.anglesin;
float tx1 = vx1 * psin - vy1 * pcos, tz1 = vx1 * pcos + vy1 * psin;
float tx2 = vx2 * psin - vy2 * pcos, tz2 = vx2 * pcos + vy2 * psin;
/* Is the wall at least partially in front of the player? */
if(tz1 <= 0 && tz2 <= 0) continue;
/* If it's partially behind the player, clip it against player's view frustrum */
#ifdef TextureMapping
int u0 = 0, u1 = 1023;
#endif
if(tz1 <= 0 || tz2 <= 0)
{
float nearz = 1e-4f, farz = 5, nearside = 1e-5f, farside = 20.f;
// Find an intersetion between the wall and the approximate edges of player's view
struct xy i1 = Intersect(tx1,tz1,tx2,tz2, -nearside,nearz, -farside,farz);
struct xy i2 = Intersect(tx1,tz1,tx2,tz2, nearside,nearz, farside,farz);
#ifdef TextureMapping
struct xy org1 = {tx1,tz1}, org2 = {tx2,tz2};
#endif
if(tz1 < nearz) { if(i1.y > 0) { tx1 = i1.x; tz1 = i1.y; } else { tx1 = i2.x; tz1 = i2.y; } }
if(tz2 < nearz) { if(i1.y > 0) { tx2 = i1.x; tz2 = i1.y; } else { tx2 = i2.x; tz2 = i2.y; } }
#ifdef TextureMapping
if(abs(tx2-tx1) > abs(tz2-tz1))
u0 = (tx1-org1.x) * 1023 / (org2.x-org1.x), u1 = (tx2-org1.x) * 1023 / (org2.x-org1.x);
else
u0 = (tz1-org1.y) * 1023 / (org2.y-org1.y), u1 = (tz2-org1.y) * 1023 / (org2.y-org1.y);
#endif
}
// if(tz1 <= 0.07f) { tx1 = (0.07f-tz1) * (tx2-tx1) / (tz2-tz1) + tx1; tz1 = 0.07f; }
// if(tz2 <= 0.07f) { tx2 = (0.07f-tz2) * (tx1-tx2) / (tz1-tz2) + tx2; tz2 = 0.07f; }
/* Do perspective transformation */
float xscale1 = (W*hfov) / (tz1), yscale1 = (H*vfov) / (tz1); int x1 = W/2 + (int)(-tx1 * xscale1);
float xscale2 = (W*hfov) / (tz2), yscale2 = (H*vfov) / (tz2); int x2 = W/2 + (int)(-tx2 * xscale2);
if(x1 >= x2) continue;
if(x2 < now.sx1 || x1 > now.sx2) continue;
/* Acquire and transform the floor and ceiling heights */
float yceil = sect->ceil - player.where.z;
float yfloor = sect->floor - player.where.z;
#define Yaw(y,z) (y + z*player.yaw)
int y1a = H/2 + (int)(-Yaw(yceil, tz1) * yscale1), y1b = H/2 + (int)(-Yaw(yfloor, tz1) * yscale1);
int y2a = H/2 + (int)(-Yaw(yceil, tz2) * yscale2), y2b = H/2 + (int)(-Yaw(yfloor, tz2) * yscale2);
/* Check the edge type. neighbor=-1 means wall, other=boundary between two sectors. */
int neighbor = sect->neighbors[s];
float nyceil=0, nyfloor=0;
if(neighbor >= 0)
{
/* Something is showing through this wall (portal). */
/* Perspective-transform the floor and ceiling coordinates of the neighboring sector. */
nyceil = sectors[neighbor].ceil - player.where.z;
nyfloor = sectors[neighbor].floor - player.where.z;
}
int ny1a = H/2 + (int)( -Yaw(nyceil, tz1) * yscale1), ny1b = H/2 + (int)( -Yaw(nyfloor, tz1) * yscale1);
int ny2a = H/2 + (int)( -Yaw(nyceil, tz2) * yscale2), ny2b = H/2 + (int)( -Yaw(nyfloor, tz2) * yscale2);
/* Render the wall. */
int beginx = max(x1, now.sx1), endx = min(x2, now.sx2);
#if defined(DepthShading) && !defined(TextureMapping)
struct Scaler z_int = Scaler_Init(x1,beginx,x2, tz1*8,tz2*8);
#endif
struct Scaler ya_int = Scaler_Init(x1,beginx,x2, y1a,y2a);
struct Scaler yb_int = Scaler_Init(x1,beginx,x2, y1b,y2b);
struct Scaler nya_int = Scaler_Init(x1,beginx,x2, ny1a,ny2a);
struct Scaler nyb_int = Scaler_Init(x1,beginx,x2, ny1b,ny2b);
for(int x = beginx; x <= endx; ++x)
{
// Affine calculation of txtx would be:
// alpha : (x-x1) / (x2-x1)
// u0 : 0;
// u1 : 1023;
// txtx : u0 + u1 * alpha;
// Perspective-corrected calculation of txtx (from Wikipedia):
// txtx : ((1-alpha) * (u0 / z0) + alpha * (u1 / z1))
// / (((1-alpha) / z0) + alpha/z1);
// Unrolled for only one division with Maxima:
// txtx : u0*z1*(x2-x) + u1*z0*(x-x1)
// / ((x2-x)*z1 + (x-x1)*z0);
#ifdef TextureMapping
int txtx = (u0*((x2-x)*tz2) + u1*((x-x1)*tz1)) / ((x2-x)*tz2 + (x-x1)*tz1);
#endif
#if defined(DepthShading) && !defined(TextureMapping)
/* Calculate the Z coordinate for this point. (Only used for lighting.) */
int z = Scaler_Next(&z_int);
#endif
/* Acquire the Y coordinates for our floor & ceiling for this X coordinate */
int ya = Scaler_Next(&ya_int);
int yb = Scaler_Next(&yb_int);
/* Clamp the ya & yb */
int cya = clamp(ya, ytop[x],ybottom[x]);
int cyb = clamp(yb, ytop[x],ybottom[x]);
// Our perspective calculation produces these two:
// screenX = W/2 + -mapX * (W*hfov) / mapZ
// screenY = H/2 + -(mapY + mapZ*yaw) * (H*vfov) / mapZ
// To translate these coordinates back into mapX, mapY and mapZ...
//
// Solving for Z, when we know Y (ceiling height):
// screenY - H/2 = -(mapY + mapZ*yaw) * (H*vfov) / mapZ
// (screenY - H/2) / (H*vfov) = -(mapY + mapZ*yaw) / mapZ
// (H/2 - screenY) / (H*vfov) = (mapY + mapZ*yaw) / mapZ
// mapZ = mapY / ((H/2 - screenY) / (H*vfov) - yaw)
// mapZ = mapY*H*vfov / (H/2 - screenY - yaw*H*vfov)
// Solving for X, when we know Z
// mapX = mapZ*(W/2 - screenX) / (W*hfov)
//
// This calculation is used for visibility tracking
// (the visibility cones in the map)
// and for floor & ceiling texture mapping.
//
#define CeilingFloorScreenCoordinatesToMapCoordinates(mapY, screenX,screenY, X,Z) \
do { Z = (mapY)*H*vfov / ((H/2 - (screenY)) - player.yaw*H*vfov); \
X = (Z) * (W/2 - (screenX)) / (W*hfov); \
RelativeMapCoordinatesToAbsoluteOnes(X,Z); } while(0)
//
#define RelativeMapCoordinatesToAbsoluteOnes(X,Z) \
do { float rtx = (Z) * pcos + (X) * psin; \
float rtz = (Z) * psin - (X) * pcos; \
X = rtx + player.where.x; Z = rtz + player.where.y; \
} while(0)
#ifdef TextureMapping
// Texture-mapping for floors and ceilings is not very optimal in my program.
// I'm converting each screen-pixel into map-coordinates by doing the perspective
// transformation in reverse, and using these map-coordinates as indexes into texture.
// This involves a few division calculations _per_ pixel, and would have been way
// too slow for the platforms targeted by Doom and Duke3D.
// In any case, there's no neat way to do it.
// It is why the SNES port of Doom didn't do floor & ceiling textures at all.
for(int y=ytop[x]; y<=ybottom[x]; ++y)
{
if(y >= cya && y <= cyb) { y = cyb; continue; }
float hei = y < cya ? yceil : yfloor;
float mapx, mapz;
CeilingFloorScreenCoordinatesToMapCoordinates(hei, x,y, mapx,mapz);
unsigned txtx = (mapx * 256), txtz = (mapz * 256);
const struct TextureSet* txt = y < cya ? sect->ceiltexture : sect->floortexture;
#ifdef LightMapping
unsigned lu = ((unsigned)((mapx - bounding_min.x) * 1024 / (bounding_max.x - bounding_min.x))) % 1024;
unsigned lv = ((unsigned)((mapz - bounding_min.y) * 1024 / (bounding_max.y - bounding_min.y))) % 1024;
int pel = ApplyLight( txt->texture[txtx % 1024][txtz % 1024],
txt->lightmap[lu][lv] );
#else
int pel = txt->texture[txtz % 1024][txtx % 1024];
#endif
((int*)surface->pixels)[y*W2+x] = pel;
}
#else
/* Render ceiling: everything above this sector's ceiling height. */
vline(x, ytop[x], cya-1, 0x111111 ,0x222222,0x111111);
/* Render floor: everything below this sector's floor height. */
vline(x, cyb+1, ybottom[x], 0x0000FF,0x0000AA,0x0000FF);
#endif
#ifdef VisibilityTracking
// Keep track of what the player can see for a neat map gimmick.
{unsigned n = NumVisibleSectors;
if(ybottom[x] >= (cyb+1))
{
float FloorXbegin,FloorZbegin,FloorXend,FloorZend;
CeilingFloorScreenCoordinatesToMapCoordinates(yfloor, x,cyb+1, FloorXbegin,FloorZbegin);
CeilingFloorScreenCoordinatesToMapCoordinates(yfloor, x,ybottom[x], FloorXend, FloorZend);
VisibleFloorBegins[n][x] = (struct xy){FloorXbegin,FloorZbegin};
VisibleFloorEnds[n][x] = (struct xy){FloorXend,FloorZend};
VisibleFloors[n][x] = 1;
}
if((cya-1) >= ytop[x])
{
float CeilXbegin, CeilZbegin, CeilXend, CeilZend;
CeilingFloorScreenCoordinatesToMapCoordinates(yceil, x,ytop[x], CeilXbegin,CeilZbegin);
CeilingFloorScreenCoordinatesToMapCoordinates(yceil, x,cya-1, CeilXend, CeilZend);
VisibleCeilBegins[n][x] = (struct xy){CeilXbegin,CeilZbegin};
VisibleCeilEnds[n][x] = (struct xy){CeilXend,CeilZend};
VisibleCeils[n][x] = 1;
}
}
#endif
/* Is there another sector behind this edge? */
if(neighbor >= 0)
{
/* Same for _their_ floor and ceiling */
int nya = Scaler_Next(&nya_int);
int nyb = Scaler_Next(&nyb_int);
/* Clamp ya2 & yb2 */
int cnya = clamp(nya, ytop[x],ybottom[x]);
int cnyb = clamp(nyb, ytop[x],ybottom[x]);
/* If our ceiling is higher than their ceiling, render upper wall */
#ifdef TextureMapping
vline2(x, cya, cnya-1, (struct Scaler)Scaler_Init(ya,cya,yb, 0,1023), txtx, §->uppertextures[s]);
#else
#ifdef DepthShading
unsigned r1 = 0x010101 * (255-z), r2 = 0x040007 * (31-z/8);
#else
unsigned r1 = 0xAAAAAA, r2 = 0x7C00D9;
#endif
vline(x, cya, cnya-1, 0, x==x1||x==x2 ? 0 : r1, 0);
#endif
ytop[x] = clamp(max(cya, cnya), ytop[x], H-1);
// If our floor is lower than their floor, render bottom wall
#ifdef TextureMapping
vline2(x, cnyb+1, cyb, (struct Scaler)Scaler_Init(ya,cnyb+1,yb, 0,1023), txtx, §->lowertextures[s]);
#else
vline(x, cnyb+1, cyb, 0, x==x1||x==x2 ? 0 : r2, 0);
#endif
ybottom[x] = clamp(min(cyb, cnyb), 0, ybottom[x]);
/* These vlines can be used to illustrate the frame being rendered. */
/* They should be disabled otherwise. */
//vline(x, ytop[x],ybottom[x], 0x330000,0xAA0000,0x330000);
//vline(x, cya,cyb, 0x330000,0xAA0000,0x330000);
}
else
{
/* There's no neighbor. Render wall. */
#ifdef TextureMapping
vline2(x, cya,cyb, (struct Scaler)Scaler_Init(ya,cya,yb, 0,1023), txtx, §->uppertextures[s]);
#else
#ifdef DepthShading
unsigned r = 0x010101 * (255-z);
#else
unsigned r = 0xAAAAAA;
#endif
vline(x, cya, cyb, 0, x==x1||x==x2 ? 0 : r, 0);
#endif
}
/* // uncomment this to see the process of a frame being rendered in real time.
SDL_UnlockSurface(surface);
SaveFrame2(surface); SDL_Flip(surface);
SDL_LockSurface(surface);*/
}
/* Schedule the other sector for rendering within the window formed by this wall. */
if(neighbor >= 0 && endx >= beginx && (head+MaxQueue+1-tail)%MaxQueue)
{
*head = (struct item) { neighbor, beginx, endx };
if(++head == queue+MaxQueue) head = queue;
//if(tail-- == queue) tail = queue+MaxQueue-1;
//*tail = (struct item) { neighbor, beginx, endx };
}
}
++renderedsectors[now.sectorno];
#ifdef VisibilityTracking
NumVisibleSectors += 1;
#endif
}
SDL_UnlockSurface(surface);
SaveFrame2();
//static unsigned skip=0;
//if(/*++skip >= 10 && */!map) { skip=0; SDL_Flip(surface); }
}
int main(int argc, char** argv)
{
LoadData();
VerifyMap();
#ifdef TextureMapping
int textures_initialized = LoadTexture();
#ifdef LightMapping
if(textures_initialized || (argc > 1 && strcmp(argv[1], "--rebuild") == 0))
BuildLightmaps();
#endif
#endif
surface = SDL_SetVideoMode(W2, H, 32, 0);
SDL_EnableKeyRepeat(150, 30);
SDL_ShowCursor(SDL_DISABLE);
signal(SIGINT, SIG_DFL);
FILE* fp = fopen("actions.log", "rb");
int wsad[4]={0,0,0,0}, ground=0, falling=1, moving=0, ducking=0, map=0;
SDL_Event ev;
float yaw = 0;
for(;;)
{
DrawScreen();
#ifdef SplitScreen
DrawMap();
#else
if(map) DrawMap();
#endif
static unsigned skip=0;
if(++skip >= 1) { skip=0; SDL_Flip(surface); }
/* Vertical collision detection */
float eyeheight = ducking ? DuckHeight : EyeHeight;
ground = !falling;
if(falling)
{
player.velocity.z -= 0.05f; /* Add gravity */
float nextz = player.where.z + player.velocity.z;
if(player.velocity.z < 0 && nextz < sectors[player.sector].floor + eyeheight)
{
/* Fix to ground */
player.where.z = sectors[player.sector].floor + eyeheight;
player.velocity.z = 0;
falling = 0;
ground = 1;
}
else if(player.velocity.z > 0 && nextz > sectors[player.sector].ceil)
{
/* Prevent jumping above ceiling */
player.velocity.z = 0;
falling = 1;
}
if(falling)
{
player.where.z += player.velocity.z;
moving = 1;
}
}
/* Horizontal collision detection */
if(moving)
{
float px = player.where.x, py = player.where.y;
float dx = player.velocity.x, dy = player.velocity.y;
const struct sector* const sect = §ors[player.sector];
/* Check if the player is about to cross one of the sector's edges */
for(int s = 0; s < sect->npoints; ++s)
if(IntersectBox(px,py, px+dx,py+dy,
sect->vertex[s+0].x, sect->vertex[s+0].y,
sect->vertex[s+1].x, sect->vertex[s+1].y)
&& PointSide(px+dx, py+dy,
sect->vertex[s+0].x, sect->vertex[s+0].y,
sect->vertex[s+1].x, sect->vertex[s+1].y) < 0)
{
float hole_low = 9e9, hole_high = -9e9;
if(sect->neighbors[s] >= 0)
{
/* Check where the hole is. */
hole_low = max( sect->floor, sectors[sect->neighbors[s]].floor );
hole_high = min( sect->ceil, sectors[sect->neighbors[s]].ceil );
}
/* Check whether we're bumping into a wall. */
if(hole_high < player.where.z+HeadMargin
|| hole_low > player.where.z-eyeheight+KneeHeight)
{
/* Bumps into a wall! Slide along the wall. */
/* This formula is from Wikipedia article "vector projection". */
float xd = sect->vertex[s+1].x - sect->vertex[s+0].x;
float yd = sect->vertex[s+1].y - sect->vertex[s+0].y;
player.velocity.x = xd * (dx*xd + dy*yd) / (xd*xd + yd*yd);
player.velocity.y = yd * (dx*xd + dy*yd) / (xd*xd + yd*yd);
moving = 0;
}
}
MovePlayer(player.velocity.x, player.velocity.y);
falling = 1;
}
while(SDL_PollEvent(&ev))
{
switch(ev.type)
{
case SDL_KEYDOWN:
case SDL_KEYUP:
switch(ev.key.keysym.sym)
{
case 'w': wsad[0] = ev.type==SDL_KEYDOWN; break;
case 's': wsad[1] = ev.type==SDL_KEYDOWN; break;
case 'a': wsad[2] = ev.type==SDL_KEYDOWN; break;
case 'd': wsad[3] = ev.type==SDL_KEYDOWN; break;
case 'q': goto done;
case ' ': /* jump */
if(ground) { player.velocity.z += 0.5; falling = 1; }
break;
case SDLK_LCTRL: /* duck */
case SDLK_RCTRL: ducking = ev.type==SDL_KEYDOWN; falling=1; break;
case SDLK_TAB: map = ev.type==SDL_KEYDOWN; break;
default: break;
}
break;
case SDL_QUIT: goto done;
}
}
/* mouse aiming */
/**/{ int x,y;
SDL_GetRelativeMouseState(&x,&y);
player.angle += x * 0.03f;
yaw = clamp(yaw - y*0.05f, -5, 5);
player.yaw = yaw - player.velocity.z*0.5f;
MovePlayer(0,0); }/**/
float move_vec[2] = {0.f, 0.f};
if(wsad[0]) { move_vec[0] += player.anglecos*0.2f; move_vec[1] += player.anglesin*0.2f; }
if(wsad[1]) { move_vec[0] -= player.anglecos*0.2f; move_vec[1] -= player.anglesin*0.2f; }
if(wsad[2]) { move_vec[0] += player.anglesin*0.2f; move_vec[1] -= player.anglecos*0.2f; }
if(wsad[3]) { move_vec[0] -= player.anglesin*0.2f; move_vec[1] += player.anglecos*0.2f; }
int pushing = wsad[0] || wsad[1] || wsad[2] || wsad[3];
float acceleration = pushing ? 0.4 : 0.2;
player.velocity.x = player.velocity.x * (1-acceleration) + move_vec[0] * acceleration;
player.velocity.y = player.velocity.y * (1-acceleration) + move_vec[1] * acceleration;
//fprintf(fp, "%g %g %d %d %d %d %d %g %g\n", player.velocity.x, player.velocity.y, pushing, ducking, falling, moving, ground, player.angle, yaw); fflush(fp);
//fscanf(fp, "%g %g %d %d %d %d %d %g %g\n", &player.velocity.x, &player.velocity.y, &pushing,&ducking,&falling,&moving,&ground, &player.angle, &yaw); MovePlayer(0,0);
if(pushing) moving = 1;
SDL_Delay(10);
}
done:
UnloadData();
SDL_Quit();
return 0;
}
|
resize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE %
% R R E SS I ZZ E %
% RRRR EEE SSS I ZZZ EEE %
% R R E SS I ZZ E %
% R R EEEEE SSSSS IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Image Resize Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resize-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#if defined(MAGICKCORE_LQR_DELEGATE)
#include <lqr.h>
#endif
/*
Typedef declarations.
*/
struct _ResizeFilter
{
double
(*filter)(const double,const ResizeFilter *),
(*window)(const double,const ResizeFilter *),
support, /* filter region of support - the filter support limit */
window_support, /* window support, usally equal to support (expert only) */
scale, /* dimension scaling to fit window support (usally 1.0) */
blur, /* x-scale (blur-sharpen) */
coefficient[7]; /* cubic coefficents for BC-cubic filters */
ResizeWeightingFunctionType
filterWeightingType,
windowWeightingType;
size_t
signature;
};
/*
Forward declaractions.
*/
static double
I0(double x),
BesselOrderOne(double),
Sinc(const double, const ResizeFilter *),
SincFast(const double, const ResizeFilter *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F i l t e r F u n c t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% These are the various filter and windowing functions that are provided.
%
% They are internal to this module only. See AcquireResizeFilterInfo() for
% details of the access to these functions, via the GetResizeFilterSupport()
% and GetResizeFilterWeight() API interface.
%
% The individual filter functions have this format...
%
% static MagickRealtype *FilterName(const double x,const double support)
%
% A description of each parameter follows:
%
% o x: the distance from the sampling point generally in the range of 0 to
% support. The GetResizeFilterWeight() ensures this a positive value.
%
% o resize_filter: current filter information. This allows function to
% access support, and possibly other pre-calculated information defining
% the functions.
%
*/
static double Blackman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Blackman: 2nd order cosine windowing function:
0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x)
Refactored by Chantal Racette and Nicolas Robidoux to one trig call and
five flops.
*/
const double cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.34+cosine*(0.5+cosine*0.16));
}
static double Bohman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Bohman: 2rd Order cosine windowing function:
(1-x) cos(pi x) + sin(pi x) / pi.
Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops,
taking advantage of the fact that the support of Bohman is 1.0 (so that we
know that sin(pi x) >= 0).
*/
const double cosine=cos((double) (MagickPI*x));
const double sine=sqrt(1.0-cosine*cosine);
magick_unreferenced(resize_filter);
return((1.0-x)*cosine+(1.0/MagickPI)*sine);
}
static double Box(const double magick_unused(x),
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(x);
magick_unreferenced(resize_filter);
/*
A Box filter is a equal weighting function (all weights equal).
DO NOT LIMIT results by support or resize point sampling will work
as it requests points beyond its normal 0.0 support size.
*/
return(1.0);
}
static double Cosine(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Cosine window function:
cos((pi/2)*x).
*/
return((double)cos((double) (MagickPI2*x)));
}
static double CubicBC(const double x,const ResizeFilter *resize_filter)
{
/*
Cubic Filters using B,C determined values:
Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter
Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears
Spline B = 1 C = 0 B-Spline Gaussian approximation
Hermite B = 0 C = 0 B-Spline interpolator
See paper by Mitchell and Netravali, Reconstruction Filters in Computer
Graphics Computer Graphics, Volume 22, Number 4, August 1988
http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/
Mitchell.pdf.
Coefficents are determined from B,C values:
P0 = ( 6 - 2*B )/6 = coeff[0]
P1 = 0
P2 = (-18 +12*B + 6*C )/6 = coeff[1]
P3 = ( 12 - 9*B - 6*C )/6 = coeff[2]
Q0 = ( 8*B +24*C )/6 = coeff[3]
Q1 = ( -12*B -48*C )/6 = coeff[4]
Q2 = ( 6*B +30*C )/6 = coeff[5]
Q3 = ( - 1*B - 6*C )/6 = coeff[6]
which are used to define the filter:
P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1
Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2
which ensures function is continuous in value and derivative (slope).
*/
if (x < 1.0)
return(resize_filter->coefficient[0]+x*(x*
(resize_filter->coefficient[1]+x*resize_filter->coefficient[2])));
if (x < 2.0)
return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x*
(resize_filter->coefficient[5]+x*resize_filter->coefficient[6])));
return(0.0);
}
static double CubicSpline(const double x,const ResizeFilter *resize_filter)
{
if (resize_filter->support <= 2.0)
{
/*
2-lobe Spline filter.
*/
if (x < 1.0)
return(((x-9.0/5.0)*x-1.0/5.0)*x+1.0);
if (x < 2.0)
return(((-1.0/3.0*(x-1.0)+4.0/5.0)*(x-1.0)-7.0/15.0)*(x-1.0));
return(0.0);
}
if (resize_filter->support <= 3.0)
{
/*
3-lobe Spline filter.
*/
if (x < 1.0)
return(((13.0/11.0*x-453.0/209.0)*x-3.0/209.0)*x+1.0);
if (x < 2.0)
return(((-6.0/11.0*(x-1.0)+270.0/209.0)*(x-1.0)-156.0/209.0)*(x-1.0));
if (x < 3.0)
return(((1.0/11.0*(x-2.0)-45.0/209.0)*(x-2.0)+26.0/209.0)*(x-2.0));
return(0.0);
}
/*
4-lobe Spline filter.
*/
if (x < 1.0)
return(((49.0/41.0*x-6387.0/2911.0)*x-3.0/2911.0)*x+1.0);
if (x < 2.0)
return(((-24.0/41.0*(x-1.0)+4032.0/2911.0)*(x-1.0)-2328.0/2911.0)*(x-1.0));
if (x < 3.0)
return(((6.0/41.0*(x-2.0)-1008.0/2911.0)*(x-2.0)+582.0/2911.0)*(x-2.0));
if (x < 4.0)
return(((-1.0/41.0*(x-3.0)+168.0/2911.0)*(x-3.0)-97.0/2911.0)*(x-3.0));
return(0.0);
}
static double Gaussian(const double x,const ResizeFilter *resize_filter)
{
/*
Gaussian with a sigma = 1/2 (or as user specified)
Gaussian Formula (1D) ...
exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2))
Gaussian Formula (2D) ...
exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
or for radius
exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
Note that it is only a change from 1-d to radial form is in the
normalization multiplier which is not needed or used when Gaussian is used
as a filter.
The constants are pre-calculated...
coeff[0]=sigma;
coeff[1]=1.0/(2.0*sigma^2);
coeff[2]=1.0/(sqrt(2*PI)*sigma^2);
exp( -coeff[1]*(x^2)) ) * coeff[2];
However the multiplier coeff[1] is need, the others are informative only.
This separates the gaussian 'sigma' value from the 'blur/support'
settings allowing for its use in special 'small sigma' gaussians,
without the filter 'missing' pixels because the support becomes too
small.
*/
return(exp((double)(-resize_filter->coefficient[1]*x*x)));
}
static double Hann(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
0.5+0.5*cos(pi*x).
*/
const double cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.5+0.5*cosine);
}
static double Hamming(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Offset cosine window function:
.54 + .46 cos(pi x).
*/
const double cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.54+0.46*cosine);
}
static double Jinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions.
http://mathworld.wolfram.com/JincFunction.html and page 11 of
http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf
The original "zoom" program by Paul Heckbert called this "Bessel". But
really it is more accurately named "Jinc".
*/
if (x == 0.0)
return(0.5*MagickPI);
return(BesselOrderOne(MagickPI*x)/x);
}
static double Kaiser(const double x,const ResizeFilter *resize_filter)
{
/*
Kaiser Windowing Function (bessel windowing)
I0( beta * sqrt( 1-x^2) ) / IO(0)
Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5).
However it is typically defined in terms of Alpha*PI
The normalization factor (coeff[1]) is not actually needed,
but without it the filters has a large value at x=0 making it
difficult to compare the function with other windowing functions.
*/
return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]*
sqrt((double) (1.0-x*x))));
}
static double Lagrange(const double x,const ResizeFilter *resize_filter)
{
double
value;
register ssize_t
i;
ssize_t
n,
order;
/*
Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange
function and depends on the overall support window size of the filter. That
is: for a support of 2, it gives a lagrange-4 (piecewise cubic function).
"n" identifies the piece of the piecewise polynomial.
See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging,
Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064.
*/
if (x > resize_filter->support)
return(0.0);
order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */
n=(ssize_t) (resize_filter->window_support+x);
value=1.0f;
for (i=0; i < order; i++)
if (i != n)
value*=(n-i-x)/(n-i);
return(value);
}
static double Quadratic(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
2rd order (quadratic) B-Spline approximation of Gaussian.
*/
if (x < 0.5)
return(0.75-x*x);
if (x < 1.5)
return(0.5*(x-1.5)*(x-1.5));
return(0.0);
}
static double Sinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Scaled sinc(x) function using a trig call:
sinc(x) == sin(pi x)/(pi x).
*/
if (x != 0.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
return((double) 1.0);
}
static double SincFast(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Approximations of the sinc function sin(pi x)/(pi x) over the interval
[-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding
from the Natural Sciences and Engineering Research Council of Canada.
Although the approximations are polynomials (for low order of
approximation) and quotients of polynomials (for higher order of
approximation) and consequently are similar in form to Taylor polynomials /
Pade approximants, the approximations are computed with a completely
different technique.
Summary: These approximations are "the best" in terms of bang (accuracy)
for the buck (flops). More specifically: Among the polynomial quotients
that can be computed using a fixed number of flops (with a given "+ - * /
budget"), the chosen polynomial quotient is the one closest to the
approximated function with respect to maximum absolute relative error over
the given interval.
The Remez algorithm, as implemented in the boost library's minimax package,
is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/
math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html
If outside of the interval of approximation, use the standard trig formula.
*/
if (x > 4.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
{
/*
The approximations only depend on x^2 (sinc is an even function).
*/
const double xx = x*x;
#if MAGICKCORE_QUANTUM_DEPTH <= 8
/*
Maximum absolute relative error 6.3e-6 < 1/2^17.
*/
const double c0 = 0.173610016489197553621906385078711564924e-2L;
const double c1 = -0.384186115075660162081071290162149315834e-3L;
const double c2 = 0.393684603287860108352720146121813443561e-4L;
const double c3 = -0.248947210682259168029030370205389323899e-5L;
const double c4 = 0.107791837839662283066379987646635416692e-6L;
const double c5 = -0.324874073895735800961260474028013982211e-8L;
const double c6 = 0.628155216606695311524920882748052490116e-10L;
const double c7 = -0.586110644039348333520104379959307242711e-12L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#elif MAGICKCORE_QUANTUM_DEPTH <= 16
/*
Max. abs. rel. error 2.2e-8 < 1/2^25.
*/
const double c0 = 0.173611107357320220183368594093166520811e-2L;
const double c1 = -0.384240921114946632192116762889211361285e-3L;
const double c2 = 0.394201182359318128221229891724947048771e-4L;
const double c3 = -0.250963301609117217660068889165550534856e-5L;
const double c4 = 0.111902032818095784414237782071368805120e-6L;
const double c5 = -0.372895101408779549368465614321137048875e-8L;
const double c6 = 0.957694196677572570319816780188718518330e-10L;
const double c7 = -0.187208577776590710853865174371617338991e-11L;
const double c8 = 0.253524321426864752676094495396308636823e-13L;
const double c9 = -0.177084805010701112639035485248501049364e-15L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9))))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#else
/*
Max. abs. rel. error 1.2e-12 < 1/2^39.
*/
const double c0 = 0.173611111110910715186413700076827593074e-2L;
const double c1 = -0.289105544717893415815859968653611245425e-3L;
const double c2 = 0.206952161241815727624413291940849294025e-4L;
const double c3 = -0.834446180169727178193268528095341741698e-6L;
const double c4 = 0.207010104171026718629622453275917944941e-7L;
const double c5 = -0.319724784938507108101517564300855542655e-9L;
const double c6 = 0.288101675249103266147006509214934493930e-11L;
const double c7 = -0.118218971804934245819960233886876537953e-13L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
const double d0 = 1.0L;
const double d1 = 0.547981619622284827495856984100563583948e-1L;
const double d2 = 0.134226268835357312626304688047086921806e-2L;
const double d3 = 0.178994697503371051002463656833597608689e-4L;
const double d4 = 0.114633394140438168641246022557689759090e-6L;
const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4)));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p);
#endif
}
}
static double Triangle(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or
a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function
for Sinc().
*/
if (x < 1.0)
return(1.0-x);
return(0.0);
}
static double Welch(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Welch parabolic windowing filter.
*/
if (x < 1.0)
return(1.0-x*x);
return(0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResizeFilter() allocates the ResizeFilter structure. Choose from
% these filters:
%
% FIR (Finite impulse Response) Filters
% Box Triangle Quadratic
% Spline Hermite Catrom
% Mitchell
%
% IIR (Infinite impulse Response) Filters
% Gaussian Sinc Jinc (Bessel)
%
% Windowed Sinc/Jinc Filters
% Blackman Bohman Lanczos
% Hann Hamming Cosine
% Kaiser Welch Parzen
% Bartlett
%
% Special Purpose Filters
% Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp
% Robidoux RobidouxSharp
%
% The users "-filter" selection is used to lookup the default 'expert'
% settings for that filter from a internal table. However any provided
% 'expert' settings (see below) may override this selection.
%
% FIR filters are used as is, and are limited to that filters support window
% (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also
% simply clipped by its support size (currently 1.5 or approximately 3*sigma
% as recommended by many references)
%
% The special a 'cylindrical' filter flag will promote the default 4-lobed
% Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better
% suited to this style of image resampling. This typically happens when using
% such a filter for images distortions.
%
% SPECIFIC FILTERS:
%
% Directly requesting 'Sinc', 'Jinc' function as a filter will force the use
% of function without any windowing, or promotion for cylindrical usage. This
% is not recommended, except by image processing experts, especially as part
% of expert option filter function selection.
%
% Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is
% computed using the traditional sin(pi*x)/(pi*x); it is selected if the user
% specifically specifies the use of a Sinc filter. SincFast uses highly
% accurate (and fast) polynomial (low Q) and rational (high Q) approximations,
% and will be used by default in most cases.
%
% The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted
% to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use).
% The Sinc version is the most popular windowed filter.
%
% LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of
% the Lanczos filter, specifically designed for EWA distortion (as a
% Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos
% (Sinc-Sinc) filter. The chosen blur value comes as close as possible to
% satisfying the following condition without changing the character of the
% corresponding EWA filter:
%
% 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with
% only vertical or horizontal features are preserved when performing 'no-op"
% with EWA distortion.
%
% The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos
% filters. The 'sharp' version uses a blur factor of 0.9549963639785485,
% again chosen because the resulting EWA filter comes as close as possible to
% satisfying the above condition.
%
% Robidoux is another filter tuned for EWA. It is the Keys cubic filter
% defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op'
% Vertical and Horizontal Line Preservation Condition" exactly, and it
% moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns
% out to be close to both Mitchell and Lanczos2Sharp. For example, its first
% crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the
% first crossing of Mitchell and Lanczos2Sharp.
%
% RodidouxSharp is a slightly sharper version of Rodidoux, some believe it
% is too sharp. It is designed to minimize the maximum possible change in
% a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op
% conditions. Amazingly Mitchell falls roughly between Rodidoux and
% RodidouxSharp, though this seems to have been pure coincidence.
%
% 'EXPERT' OPTIONS:
%
% These artifact "defines" are not recommended for production use without
% expert knowledge of resampling, filtering, and the effects they have on the
% resulting resampled (resized or distorted) image.
%
% They can be used to override any and all filter default, and it is
% recommended you make good use of "filter:verbose" to make sure that the
% overall effect of your selection (before and after) is as expected.
%
% "filter:verbose" controls whether to output the exact results of the
% filter selections made, as well as plotting data for graphing the
% resulting filter over the filters support range.
%
% "filter:filter" select the main function associated with this filter
% name, as the weighting function of the filter. This can be used to
% set a windowing function as a weighting function, for special
% purposes, such as graphing.
%
% If a "filter:window" operation has not been provided, a 'Box'
% windowing function will be set to denote that no windowing function is
% being used.
%
% "filter:window" Select this windowing function for the filter. While any
% filter could be used as a windowing function, using the 'first lobe' of
% that filter over the whole support window, using a non-windowing
% function is not advisible. If no weighting filter function is specified
% a 'SincFast' filter is used.
%
% "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a
% simpler method of setting filter support size that will correctly
% handle the Sinc/Jinc switch for an operators filtering requirements.
% Only integers should be given.
%
% "filter:support" Set the support size for filtering to the size given.
% This not recommended for Sinc/Jinc windowed filters (lobes should be
% used instead). This will override any 'filter:lobes' option.
%
% "filter:win-support" Scale windowing function to this size instead. This
% causes the windowing (or self-windowing Lagrange filter) to act is if
% the support window it much much larger than what is actually supplied
% to the calling operator. The filter however is still clipped to the
% real support size given, by the support range supplied to the caller.
% If unset this will equal the normal filter support size.
%
% "filter:blur" Scale the filter and support window by this amount. A value
% of > 1 will generally result in a more blurred image with more ringing
% effects, while a value <1 will sharpen the resulting image with more
% aliasing effects.
%
% "filter:sigma" The sigma value to use for the Gaussian filter only.
% Defaults to '1/2'. Using a different sigma effectively provides a
% method of using the filter as a 'blur' convolution. Particularly when
% using it for Distort.
%
% "filter:b"
% "filter:c" Override the preset B,C values for a Cubic filter.
% If only one of these are given it is assumes to be a 'Keys' type of
% filter such that B+2C=1, where Keys 'alpha' value = C.
%
% Examples:
%
% Set a true un-windowed Sinc filter with 10 lobes (very slow):
% -define filter:filter=Sinc
% -define filter:lobes=8
%
% Set an 8 lobe Lanczos (Sinc or Jinc) filter:
% -filter Lanczos
% -define filter:lobes=8
%
% The format of the AcquireResizeFilter method is:
%
% ResizeFilter *AcquireResizeFilter(const Image *image,
% const FilterType filter_type,const MagickBooleanType cylindrical,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filter: the filter type, defining a preset filter, window and support.
% The artifact settings listed above will override those selections.
%
% o blur: blur the filter by this amount, use 1.0 if unknown. Image
% artifact "filter:blur" will override this API call usage, including any
% internal change (such as for cylindrical usage).
%
% o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial)
% filter (Jinc).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image,
const FilterType filter,const MagickBooleanType cylindrical,
ExceptionInfo *exception)
{
const char
*artifact;
FilterType
filter_type,
window_type;
double
B,
C,
value;
register ResizeFilter
*resize_filter;
/*
Table Mapping given Filter, into Weighting and Windowing functions.
A 'Box' windowing function means its a simble non-windowed filter.
An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a
"cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was
specifically requested by the user.
WARNING: The order of this table must match the order of the FilterType
enumeration specified in "resample.h", or the filter names will not match
the filter being setup.
You can check filter setups with the "filter:verbose" expert setting.
*/
static struct
{
FilterType
filter,
window;
} const mapping[SentinelFilter] =
{
{ UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */
{ PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */
{ BoxFilter, BoxFilter }, /* Box averaging filter */
{ TriangleFilter, BoxFilter }, /* Linear interpolation filter */
{ HermiteFilter, BoxFilter }, /* Hermite interpolation filter */
{ SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */
{ SincFastFilter, HammingFilter }, /* Hamming -- '' variation */
{ SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */
{ GaussianFilter, BoxFilter }, /* Gaussian blur filter */
{ QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */
{ CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */
{ CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */
{ MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */
{ JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */
{ SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */
{ SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */
{ SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */
{ LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */
{ SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */
{ SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */
{ SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */
{ LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */
{ LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */
{ LanczosSharpFilter, LanczosSharpFilter }, /* | these require */
{ Lanczos2Filter, Lanczos2Filter }, /* | special handling */
{ Lanczos2SharpFilter, Lanczos2SharpFilter },
{ RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */
{ RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */
{ LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */
{ SplineFilter, BoxFilter }, /* Spline Cubic Filter */
{ LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */
{ CubicSplineFilter, BoxFilter }, /* CubicSpline (2/3/4 lobes) */
};
/*
Table mapping the filter/window from the above table to an actual function.
The default support size for that filter as a weighting function, the range
to scale with to use that function as a sinc windowing function, (typ 1.0).
Note that the filter_type -> function is 1 to 1 except for Sinc(),
SincFast(), and CubicBC() functions, which may have multiple filter to
function associations.
See "filter:verbose" handling below for the function -> filter mapping.
*/
static struct
{
double
(*function)(const double,const ResizeFilter*),
support, /* Default lobes/support size of the weighting filter. */
scale, /* Support when function used as a windowing function
Typically equal to the location of the first zero crossing. */
B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */
ResizeWeightingFunctionType weightingFunctionType;
} const filters[SentinelFilter] =
{
/* .--- support window (if used as a Weighting Function)
| .--- first crossing (if used as a Windowing Function)
| | .--- B value for Cubic Function
| | | .---- C value for Cubic Function
| | | | */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */
{ Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */
{ CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */
{ Hann, 1.0, 1.0, 0.0, 0.0, HannWeightingFunction }, /* Hann, cosine window */
{ Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */
{ Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */
{ Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */
{ Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */
{ CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */
{ CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */
{ Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */
{ Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */
{ SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */
{ Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */
{ Welch, 1.0, 1.0, 0.0, 0.0, WelchWeightingFunction }, /* Welch (parabolic window) */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */
{ Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */
{ Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */
/* Robidoux: Keys cubic close to Lanczos2D sharpened */
{ CubicBC, 2.0, 1.1685777620836932,
0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction },
/* RobidouxSharp: Sharper version of Robidoux */
{ CubicBC, 2.0, 1.105822933719019,
0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction },
{ Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */
{ CubicSpline,2.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Spline Lobes 2-lobed */
};
/*
The known zero crossings of the Jinc() or more accurately the Jinc(x*PI)
function being used as a filter. It is used by the "filter:lobes" expert
setting and for 'lobes' for Jinc functions in the previous table. This way
users do not have to deal with the highly irrational lobe sizes of the Jinc
filter.
Values taken from
http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp
using Jv-function with v=1, then dividing by PI.
*/
static double
jinc_zeros[16] =
{
1.2196698912665045,
2.2331305943815286,
3.2383154841662362,
4.2410628637960699,
5.2427643768701817,
6.2439216898644877,
7.2447598687199570,
8.2453949139520427,
9.2458926849494673,
10.246293348754916,
11.246622794877883,
12.246898461138105,
13.247132522181061,
14.247333735806849,
15.247508563037300,
16.247661874700962
};
/*
Allocate resize filter.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(UndefinedFilter < filter && filter < SentinelFilter);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) exception;
resize_filter=(ResizeFilter *) AcquireCriticalMemory(sizeof(*resize_filter));
(void) memset(resize_filter,0,sizeof(*resize_filter));
/*
Defaults for the requested filter.
*/
filter_type=mapping[filter].filter;
window_type=mapping[filter].window;
resize_filter->blur=1.0;
/* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */
if ((cylindrical != MagickFalse) && (filter_type == SincFastFilter) &&
(filter != SincFastFilter))
filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */
/* Expert filter setting override */
artifact=GetImageArtifact(image,"filter:filter");
if (IsStringTrue(artifact) != MagickFalse)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{ /* Raw filter request - no window function. */
filter_type=(FilterType) option;
window_type=BoxFilter;
}
/* Filter override with a specific window function. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
window_type=(FilterType) option;
}
}
else
{
/* Window specified, but no filter function? Assume Sinc/Jinc. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{
filter_type= cylindrical != MagickFalse ? JincFilter
: SincFastFilter;
window_type=(FilterType) option;
}
}
}
/* Assign the real functions to use for the filters selected. */
resize_filter->filter=filters[filter_type].function;
resize_filter->support=filters[filter_type].support;
resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType;
resize_filter->window=filters[window_type].function;
resize_filter->windowWeightingType=filters[window_type].weightingFunctionType;
resize_filter->scale=filters[window_type].scale;
resize_filter->signature=MagickCoreSignature;
/* Filter Modifications for orthogonal/cylindrical usage */
if (cylindrical != MagickFalse)
switch (filter_type)
{
case BoxFilter:
/* Support for Cylindrical Box should be sqrt(2)/2 */
resize_filter->support=(double) MagickSQ1_2;
break;
case LanczosFilter:
case LanczosSharpFilter:
case Lanczos2Filter:
case Lanczos2SharpFilter:
case LanczosRadiusFilter:
resize_filter->filter=filters[JincFilter].function;
resize_filter->window=filters[JincFilter].function;
resize_filter->scale=filters[JincFilter].scale;
/* number of lobes (support window size) remain unchanged */
break;
default:
break;
}
/* Global Sharpening (regardless of orthoginal/cylindrical) */
switch (filter_type)
{
case LanczosSharpFilter:
resize_filter->blur *= 0.9812505644269356;
break;
case Lanczos2SharpFilter:
resize_filter->blur *= 0.9549963639785485;
break;
/* case LanczosRadius: blur adjust is done after lobes */
default:
break;
}
/*
Expert Option Modifications.
*/
/* User Gaussian Sigma Override - no support change */
if ((resize_filter->filter == Gaussian) ||
(resize_filter->window == Gaussian) ) {
value=0.5; /* guassian sigma default, half pixel */
artifact=GetImageArtifact(image,"filter:sigma");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
/* Define coefficents for Gaussian */
resize_filter->coefficient[0]=value; /* note sigma too */
resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */
resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value);
/* normalization - not actually needed or used! */
if ( value > 0.5 )
resize_filter->support *= 2*value; /* increase support linearly */
}
/* User Kaiser Alpha Override - no support change */
if ((resize_filter->filter == Kaiser) ||
(resize_filter->window == Kaiser) ) {
value=6.5; /* default beta value for Kaiser bessel windowing function */
artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-beta");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-alpha");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL)*MagickPI;
/* Define coefficents for Kaiser Windowing Function */
resize_filter->coefficient[0]=value; /* alpha */
resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value));
/* normalization */
}
/* Support Overrides */
artifact=GetImageArtifact(image,"filter:lobes");
if (artifact != (const char *) NULL)
{
ssize_t
lobes;
lobes=(ssize_t) StringToLong(artifact);
if (lobes < 1)
lobes=1;
resize_filter->support=(double) lobes;
}
if (resize_filter->filter == Jinc)
{
/*
Convert a Jinc function lobes value to a real support value.
*/
if (resize_filter->support > 16)
resize_filter->support=jinc_zeros[15]; /* largest entry in table */
else
resize_filter->support=jinc_zeros[((long) resize_filter->support)-1];
/*
Blur this filter so support is a integer value (lobes dependant).
*/
if (filter_type == LanczosRadiusFilter)
resize_filter->blur*=floor(resize_filter->support)/
resize_filter->support;
}
/*
Expert blur override.
*/
artifact=GetImageArtifact(image,"filter:blur");
if (artifact != (const char *) NULL)
resize_filter->blur*=StringToDouble(artifact,(char **) NULL);
if (resize_filter->blur < MagickEpsilon)
resize_filter->blur=(double) MagickEpsilon;
/*
Expert override of the support setting.
*/
artifact=GetImageArtifact(image,"filter:support");
if (artifact != (const char *) NULL)
resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Scale windowing function separately to the support 'clipping' window
that calling operator is planning to actually use. (Expert override)
*/
resize_filter->window_support=resize_filter->support; /* default */
artifact=GetImageArtifact(image,"filter:win-support");
if (artifact != (const char *) NULL)
resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Adjust window function scaling to match windowing support for weighting
function. This avoids a division on every filter call.
*/
resize_filter->scale/=resize_filter->window_support;
/*
* Set Cubic Spline B,C values, calculate Cubic coefficients.
*/
B=0.0;
C=0.0;
if ((resize_filter->filter == CubicBC) ||
(resize_filter->window == CubicBC) )
{
B=filters[filter_type].B;
C=filters[filter_type].C;
if (filters[window_type].function == CubicBC)
{
B=filters[window_type].B;
C=filters[window_type].C;
}
artifact=GetImageArtifact(image,"filter:b");
if (artifact != (const char *) NULL)
{
B=StringToDouble(artifact,(char **) NULL);
C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */
artifact=GetImageArtifact(image,"filter:c"); /* user C override */
if (artifact != (const char *) NULL)
C=StringToDouble(artifact,(char **) NULL);
}
else
{
artifact=GetImageArtifact(image,"filter:c");
if (artifact != (const char *) NULL)
{
C=StringToDouble(artifact,(char **) NULL);
B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */
}
}
{
const double
twoB = B+B;
/*
Convert B,C values into Cubic Coefficents. See CubicBC().
*/
resize_filter->coefficient[0]=1.0-(1.0/3.0)*B;
resize_filter->coefficient[1]=-3.0+twoB+C;
resize_filter->coefficient[2]=2.0-1.5*B-C;
resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C;
resize_filter->coefficient[4]=-8.0*C-twoB;
resize_filter->coefficient[5]=B+5.0*C;
resize_filter->coefficient[6]=(-1.0/6.0)*B-C;
}
}
/*
Expert Option Request for verbose details of the resulting filter.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp master
{
#endif
if (IsStringTrue(GetImageArtifact(image,"filter:verbose")) != MagickFalse)
{
double
support,
x;
/*
Set the weighting function properly when the weighting function
may not exactly match the filter of the same name. EG: a Point
filter is really uses a Box weighting function with a different
support than is typically used.
*/
if (resize_filter->filter == Box) filter_type=BoxFilter;
if (resize_filter->filter == Sinc) filter_type=SincFilter;
if (resize_filter->filter == SincFast) filter_type=SincFastFilter;
if (resize_filter->filter == Jinc) filter_type=JincFilter;
if (resize_filter->filter == CubicBC) filter_type=CubicFilter;
if (resize_filter->window == Box) window_type=BoxFilter;
if (resize_filter->window == Sinc) window_type=SincFilter;
if (resize_filter->window == SincFast) window_type=SincFastFilter;
if (resize_filter->window == Jinc) window_type=JincFilter;
if (resize_filter->window == CubicBC) window_type=CubicFilter;
/*
Report Filter Details.
*/
support=GetResizeFilterSupport(resize_filter); /* practical_support */
(void) FormatLocaleFile(stdout,
"# Resampling Filter (for graphing)\n#\n");
(void) FormatLocaleFile(stdout,"# filter = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,filter_type));
(void) FormatLocaleFile(stdout,"# window = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,window_type));
(void) FormatLocaleFile(stdout,"# support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->support);
(void) FormatLocaleFile(stdout,"# window-support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->window_support);
(void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n",
GetMagickPrecision(),(double)resize_filter->blur);
if ((filter_type == GaussianFilter) || (window_type == GaussianFilter))
(void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n",
GetMagickPrecision(),(double)resize_filter->coefficient[0]);
if ( filter_type == KaiserFilter || window_type == KaiserFilter )
(void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n",
GetMagickPrecision(),(double)resize_filter->coefficient[0]);
(void) FormatLocaleFile(stdout,"# practical-support = %.*g\n",
GetMagickPrecision(), (double)support);
if ( filter_type == CubicFilter || window_type == CubicFilter )
(void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n",
GetMagickPrecision(),(double)B, GetMagickPrecision(),(double)C);
(void) FormatLocaleFile(stdout,"\n");
/*
Output values of resulting filter graph -- for graphing filter result.
*/
for (x=0.0; x <= support; x+=0.01f)
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,
GetMagickPrecision(),(double)
GetResizeFilterWeight(resize_filter,x));
/*
A final value so gnuplot can graph the 'stop' properly.
*/
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support,
GetMagickPrecision(),0.0);
}
/* Output the above once only for each image - remove setting */
(void) DeleteImageArtifact((Image *) image,"filter:verbose");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
}
#endif
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveResizeImage() adaptively resize image with pixel resampling.
%
% This is shortcut function for a fast interpolative resize using mesh
% interpolation. It works well for small resizes of less than +/- 50%
% of the original image size. For larger resizing on images a full
% filtered and slower resize function should be used instead.
%
% The format of the AdaptiveResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
Image
*resize_image;
resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel,
exception);
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ B e s s e l O r d e r O n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BesselOrderOne() computes the Bessel function of x of the first kind of
% order 0. This is used to create the Jinc() filter function below.
%
% Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8]
%
% j1(x) = x*j1(x);
%
% For x in (8,inf)
%
% j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
%
% where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow:
%
% cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
% = 1/sqrt(2) * (sin(x) - cos(x))
% sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
% = -1/sqrt(2) * (sin(x) + cos(x))
%
% The format of the BesselOrderOne method is:
%
% double BesselOrderOne(double x)
%
% A description of each parameter follows:
%
% o x: double value.
%
*/
#undef I0
static double I0(double x)
{
double
sum,
t,
y;
register ssize_t
i;
/*
Zeroth order Bessel function of the first kind.
*/
sum=1.0;
y=x*x/4.0;
t=y;
for (i=2; t > MagickEpsilon; i++)
{
sum+=t;
t*=y/((double) i*i);
}
return(sum);
}
#undef J1
static double J1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.581199354001606143928050809e+21,
-0.6672106568924916298020941484e+20,
0.2316433580634002297931815435e+19,
-0.3588817569910106050743641413e+17,
0.2908795263834775409737601689e+15,
-0.1322983480332126453125473247e+13,
0.3413234182301700539091292655e+10,
-0.4695753530642995859767162166e+7,
0.270112271089232341485679099e+4
},
Qone[] =
{
0.11623987080032122878585294e+22,
0.1185770712190320999837113348e+20,
0.6092061398917521746105196863e+17,
0.2081661221307607351240184229e+15,
0.5243710262167649715406728642e+12,
0.1013863514358673989967045588e+10,
0.1501793594998585505921097578e+7,
0.1606931573481487801970916749e+4,
0.1e+1
};
p=Pone[8];
q=Qone[8];
for (i=7; i >= 0; i--)
{
p=p*x*x+Pone[i];
q=q*x*x+Qone[i];
}
return(p/q);
}
#undef P1
static double P1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.352246649133679798341724373e+5,
0.62758845247161281269005675e+5,
0.313539631109159574238669888e+5,
0.49854832060594338434500455e+4,
0.2111529182853962382105718e+3,
0.12571716929145341558495e+1
},
Qone[] =
{
0.352246649133679798068390431e+5,
0.626943469593560511888833731e+5,
0.312404063819041039923015703e+5,
0.4930396490181088979386097e+4,
0.2030775189134759322293574e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
#undef Q1
static double Q1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.3511751914303552822533318e+3,
0.7210391804904475039280863e+3,
0.4259873011654442389886993e+3,
0.831898957673850827325226e+2,
0.45681716295512267064405e+1,
0.3532840052740123642735e-1
},
Qone[] =
{
0.74917374171809127714519505e+4,
0.154141773392650970499848051e+5,
0.91522317015169922705904727e+4,
0.18111867005523513506724158e+4,
0.1038187585462133728776636e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
static double BesselOrderOne(double x)
{
double
p,
q;
if (x == 0.0)
return(0.0);
p=x;
if (x < 0.0)
x=(-x);
if (x < 8.0)
return(p*J1(x));
q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)-
cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+
cos((double) x))));
if (p < 0.0)
q=(-q);
return(q);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResizeFilter() destroy the resize filter.
%
% The format of the DestroyResizeFilter method is:
%
% ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o resize_filter: the resize filter.
%
*/
MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
resize_filter->signature=(~MagickCoreSignature);
resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter);
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r S u p p o r t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterSupport() return the current support window size for this
% filter. Note that this may have been enlarged by filter:blur factor.
%
% The format of the GetResizeFilterSupport method is:
%
% double GetResizeFilterSupport(const ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o filter: Image filter to use.
%
*/
MagickPrivate double *GetResizeFilterCoefficient(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return((double *) resize_filter->coefficient);
}
MagickPrivate double GetResizeFilterBlur(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->blur);
}
MagickPrivate double GetResizeFilterScale(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->scale);
}
MagickPrivate double GetResizeFilterWindowSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->window_support);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->filterWeightingType);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->windowWeightingType);
}
MagickPrivate double GetResizeFilterSupport(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->support*resize_filter->blur);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r W e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterWeight evaluates the specified resize filter at the point x
% which usally lies between zero and the filters current 'support' and
% returns the weight of the filter function at that point.
%
% The format of the GetResizeFilterWeight method is:
%
% double GetResizeFilterWeight(const ResizeFilter *resize_filter,
% const double x)
%
% A description of each parameter follows:
%
% o filter: the filter type.
%
% o x: the point.
%
*/
MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter,
const double x)
{
double
scale,
weight,
x_blur;
/*
Windowing function - scale the weighting filter by this amount.
*/
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */
if ((resize_filter->window_support < MagickEpsilon) ||
(resize_filter->window == Box))
scale=1.0; /* Point or Box Filter -- avoid division by zero */
else
{
scale=resize_filter->scale;
scale=resize_filter->window(x_blur*scale,resize_filter);
}
weight=scale*resize_filter->filter(x_blur,resize_filter);
return(weight);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolativeResizeImage() resizes an image using the specified
% interpolation method.
%
% The format of the InterpolativeResizeImage method is:
%
% Image *InterpolativeResizeImage(const Image *image,const size_t columns,
% const size_t rows,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *InterpolativeResizeImage(const Image *image,
const size_t columns,const size_t rows,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define InterpolativeResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
Image
*resize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
scale;
ssize_t
y;
/*
Interpolatively resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
scale.x=(double) image->columns/resize_image->columns;
scale.y=(double) image->rows/resize_image->rows;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
PointInfo
offset;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
continue;
offset.y=((double) y+0.5)*scale.y-0.5;
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(resize_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
offset.x=((double) x+0.5)*scale.x-0.5;
status=InterpolatePixelChannels(image,image_view,resize_image,method,
offset.x,offset.y,q,exception);
if (status == MagickFalse)
break;
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InterpolativeResizeImage)
#endif
proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
resize_image=DestroyImage(resize_image);
return(resize_image);
}
#if defined(MAGICKCORE_LQR_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i q u i d R e s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiquidRescaleImage() rescales image with seam carving.
%
% The format of the LiquidRescaleImage method is:
%
% Image *LiquidRescaleImage(const Image *image,const size_t columns,
% const size_t rows,const double delta_x,const double rigidity,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the rescaled image.
%
% o rows: the number of rows in the rescaled image.
%
% o delta_x: maximum seam transversal step (0 means straight seams).
%
% o rigidity: introduce a bias for non-straight seams (typically 0).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns,
const size_t rows,const double delta_x,const double rigidity,
ExceptionInfo *exception)
{
#define LiquidRescaleImageTag "Rescale/Image"
CacheView
*image_view,
*rescale_view;
gfloat
*packet,
*pixels;
Image
*rescale_image;
int
x_offset,
y_offset;
LqrCarver
*carver;
LqrRetVal
lqr_status;
MagickBooleanType
status;
MemoryInfo
*pixel_info;
register gfloat
*q;
ssize_t
y;
/*
Liquid rescale image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
if ((columns <= 2) || (rows <= 2))
return(ResizeImage(image,columns,rows,image->filter,exception));
pixel_info=AcquireVirtualMemory(image->columns,image->rows*MaxPixelChannels*
sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
return((Image *) NULL);
pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info);
status=MagickTrue;
q=pixels;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
*q++=QuantumScale*p[i];
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows,
(int) GetPixelChannels(image),LQR_COLDEPTH_32F);
if (carver == (LqrCarver *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
lqr_carver_set_preserve_input_image(carver);
lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity);
lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows);
(void) lqr_status;
rescale_image=CloneImage(image,lqr_carver_get_width(carver),
lqr_carver_get_height(carver),MagickTrue,exception);
if (rescale_image == (Image *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
return((Image *) NULL);
}
if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
rescale_image=DestroyImage(rescale_image);
return((Image *) NULL);
}
rescale_view=AcquireAuthenticCacheView(rescale_image,exception);
(void) lqr_carver_scan_reset(carver);
while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0)
{
register Quantum
*magick_restrict p;
register ssize_t
i;
p=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1,
exception);
if (p == (Quantum *) NULL)
break;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
rescale_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
rescale_traits=GetPixelChannelTraits(rescale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(rescale_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange*
packet[i]),p);
}
if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse)
break;
}
rescale_view=DestroyCacheView(rescale_view);
pixel_info=RelinquishVirtualMemory(pixel_info);
lqr_carver_destroy(carver);
return(rescale_image);
}
#else
MagickExport Image *LiquidRescaleImage(const Image *image,
const size_t magick_unused(columns),const size_t magick_unused(rows),
const double magick_unused(delta_x),const double magick_unused(rigidity),
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError,
"DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename);
return((Image *) NULL);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagnifyImage() doubles the size of the image with a pixel art scaling
% algorithm.
%
% The format of the MagnifyImage method is:
%
% Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
{
#define MagnifyImageTag "Magnify/Image"
CacheView
*image_view,
*magnify_view;
Image
*magnify_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize magnified image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
magnify_image=CloneImage(image,2*image->columns,2*image->rows,MagickTrue,
exception);
if (magnify_image == (Image *) NULL)
return((Image *) NULL);
/*
Magnify image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
magnify_view=AcquireAuthenticCacheView(magnify_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,magnify_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(magnify_view,0,2*y,magnify_image->columns,2,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
/*
Magnify this row of pixels.
*/
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity[9];
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict r;
register ssize_t
i;
size_t
channels;
p=GetCacheViewVirtualPixels(image_view,x-1,y-1,3,3,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
channels=GetPixelChannels(image);
for (i=0; i < 9; i++)
intensity[i]=GetPixelIntensity(image,p+i*channels);
r=q;
if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) ||
(fabs(intensity[3]-intensity[5]) < MagickEpsilon))
{
/*
Clone center pixel.
*/
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image)*(magnify_image->columns-1);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
}
else
{
/*
Selectively clone pixel.
*/
if (fabs(intensity[1]-intensity[3]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[3*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
if (fabs(intensity[1]-intensity[5]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[5*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image)*(magnify_image->columns-1);
if (fabs(intensity[3]-intensity[7]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[3*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
if (fabs(intensity[5]-intensity[7]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[5*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
}
q+=2*GetPixelChannels(magnify_image);
}
if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MagnifyImage)
#endif
proceed=SetImageProgress(image,MagnifyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
magnify_view=DestroyCacheView(magnify_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
magnify_image=DestroyImage(magnify_image);
return(magnify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinifyImage() is a convenience method that scales an image proportionally to
% half its size.
%
% The format of the MinifyImage method is:
%
% Image *MinifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*minify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter,
exception);
return(minify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResampleImage() resize image in terms of its pixel size, so that when
% displayed at the given resolution it will be the same size in terms of
% real world units as the original image at the original resolution.
%
% The format of the ResampleImage method is:
%
% Image *ResampleImage(Image *image,const double x_resolution,
% const double y_resolution,const FilterType filter,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be resized to fit the given resolution.
%
% o x_resolution: the new image x resolution.
%
% o y_resolution: the new image y resolution.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ResampleImage(const Image *image,const double x_resolution,
const double y_resolution,const FilterType filter,ExceptionInfo *exception)
{
#define ResampleImageTag "Resample/Image"
Image
*resample_image;
size_t
height,
width;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ?
72.0 : image->resolution.x)+0.5);
height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ?
72.0 : image->resolution.y)+0.5);
resample_image=ResizeImage(image,width,height,filter,exception);
if (resample_image != (Image *) NULL)
{
resample_image->resolution.x=x_resolution;
resample_image->resolution.y=y_resolution;
}
return(resample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResizeImage() scales an image to the desired dimensions, using the given
% filter (see AcquireFilterInfo()).
%
% If an undefined filter is given the filter defaults to Mitchell for a
% colormapped image, a image with a matte channel, or if the image is
% enlarged. Otherwise the filter defaults to a Lanczos.
%
% ResizeImage() was inspired by Paul Heckbert's "zoom" program.
%
% The format of the ResizeImage method is:
%
% Image *ResizeImage(Image *image,const size_t columns,const size_t rows,
% const FilterType filter,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _ContributionInfo
{
double
weight;
ssize_t
pixel;
} ContributionInfo;
static ContributionInfo **DestroyContributionThreadSet(
ContributionInfo **contribution)
{
register ssize_t
i;
assert(contribution != (ContributionInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (contribution[i] != (ContributionInfo *) NULL)
contribution[i]=(ContributionInfo *) RelinquishAlignedMemory(
contribution[i]);
contribution=(ContributionInfo **) RelinquishMagickMemory(contribution);
return(contribution);
}
static ContributionInfo **AcquireContributionThreadSet(const size_t count)
{
register ssize_t
i;
ContributionInfo
**contribution;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads,
sizeof(*contribution));
if (contribution == (ContributionInfo **) NULL)
return((ContributionInfo **) NULL);
(void) memset(contribution,0,number_threads*sizeof(*contribution));
for (i=0; i < (ssize_t) number_threads; i++)
{
contribution[i]=(ContributionInfo *) MagickAssumeAligned(
AcquireAlignedMemory(count,sizeof(**contribution)));
if (contribution[i] == (ContributionInfo *) NULL)
return(DestroyContributionThreadSet(contribution));
}
return(contribution);
}
static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const double x_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
#define ResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
MagickBooleanType
status;
double
scale,
support;
ssize_t
x;
/*
Apply filter to resize horizontally from image to resize image.
*/
scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,resize_image,resize_image->columns,1)
#endif
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
register const Quantum
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register Quantum
*magick_restrict q;
register ssize_t
y;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (x+0.5)/x_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t)
(contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception);
q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
register ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j-start].pixel-contribution[0].pixel);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
/*
Alpha blending.
*/
gamma=0.0;
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight*QuantumScale*
GetPixelAlpha(image,p+k*GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HorizontalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const double y_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
double
scale,
support;
MagickBooleanType
status;
ssize_t
y;
/*
Apply filter to resize vertically from image to resize image.
*/
scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
register const Quantum
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (y+0.5)/y_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel,
image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),
exception);
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
register ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)*
image->columns+x);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
gamma=0.0;
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k*
GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_VerticalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
MagickExport Image *ResizeImage(const Image *image,const size_t columns,
const size_t rows,const FilterType filter,ExceptionInfo *exception)
{
double
x_factor,
y_factor;
FilterType
filter_type;
Image
*filter_image,
*resize_image;
MagickOffsetType
offset;
MagickSizeType
span;
MagickStatusType
status;
ResizeFilter
*resize_filter;
/*
Acquire resize image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows) &&
(filter == UndefinedFilter))
return(CloneImage(image,0,0,MagickTrue,exception));
/*
Acquire resize filter.
*/
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
filter_type=LanczosFilter;
if (filter != UndefinedFilter)
filter_type=filter;
else
if ((x_factor == 1.0) && (y_factor == 1.0))
filter_type=PointFilter;
else
if ((image->storage_class == PseudoClass) ||
(image->alpha_trait != UndefinedPixelTrait) ||
((x_factor*y_factor) > 1.0))
filter_type=MitchellFilter;
resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
resize_image=AccelerateResizeImage(image,columns,rows,resize_filter,
exception);
if (resize_image != (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
#endif
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
if (x_factor > y_factor)
filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception);
else
filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception);
if (filter_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(DestroyImage(resize_image));
}
/*
Resize image.
*/
offset=0;
if (x_factor > y_factor)
{
span=(MagickSizeType) (filter_image->columns+rows);
status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span,
&offset,exception);
status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor,
span,&offset,exception);
}
else
{
span=(MagickSizeType) (filter_image->rows+columns);
status=VerticalFilter(resize_filter,image,filter_image,y_factor,span,
&offset,exception);
status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor,
span,&offset,exception);
}
/*
Free resources.
*/
filter_image=DestroyImage(filter_image);
resize_filter=DestroyResizeFilter(resize_filter);
if (status == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
resize_image->type=image->type;
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SampleImage() scales an image to the desired dimensions with pixel
% sampling. Unlike other scaling methods, this method does not introduce
% any additional color into the scaled image.
%
% The format of the SampleImage method is:
%
% Image *SampleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the sampled image.
%
% o rows: the number of rows in the sampled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SampleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleImageTag "Sample/Image"
CacheView
*image_view,
*sample_view;
Image
*sample_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
x1;
ssize_t
*x_offset,
y;
PointInfo
sample_offset;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
sample_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
/*
Set the sampling offset, default is in the mid-point of sample regions.
*/
sample_offset.x=sample_offset.y=0.5-MagickEpsilon;
{
const char
*value;
value=GetImageArtifact(image,"sample:offset");
if (value != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
(void) ParseGeometry(value,&geometry_info);
flags=ParseGeometry(value,&geometry_info);
sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon;
if ((flags & SigmaValue) != 0)
sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon;
}
}
/*
Allocate scan line buffer and column offset buffers.
*/
x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns,
sizeof(*x_offset));
if (x_offset == (ssize_t *) NULL)
{
sample_image=DestroyImage(sample_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (x1=0; x1 < (ssize_t) sample_image->columns; x1++)
x_offset[x1]=(ssize_t) ((((double) x1+sample_offset.x)*image->columns)/
sample_image->columns);
/*
Sample each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sample_view=AcquireAuthenticCacheView(sample_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,sample_image,sample_image->rows,1)
#endif
for (y=0; y < (ssize_t) sample_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
y_offset;
if (status == MagickFalse)
continue;
y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/
sample_image->rows);
p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Sample each column.
*/
for (x=0; x < (ssize_t) sample_image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(sample_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(sample_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++)
{
PixelChannel
channel;
PixelTrait
image_traits,
traits;
channel=GetPixelChannelChannel(sample_image,i);
traits=GetPixelChannelTraits(sample_image,channel);
image_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(image_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels(
image)+i],q);
}
q+=GetPixelChannels(sample_image);
}
if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SampleImage)
#endif
proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sample_view=DestroyCacheView(sample_view);
x_offset=(ssize_t *) RelinquishMagickMemory(x_offset);
sample_image->type=image->type;
if (status == MagickFalse)
sample_image=DestroyImage(sample_image);
return(sample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleImage() changes the size of an image to the given dimensions.
%
% The format of the ScaleImage method is:
%
% Image *ScaleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ScaleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define ScaleImageTag "Scale/Image"
CacheView
*image_view,
*scale_view;
double
alpha,
pixel[CompositePixelChannel],
*scale_scanline,
*scanline,
*x_vector,
*y_vector;
Image
*scale_image;
MagickBooleanType
next_column,
next_row,
proceed,
status;
PixelTrait
scale_traits;
PointInfo
scale,
span;
register ssize_t
i;
ssize_t
n,
number_rows,
y;
/*
Initialize scaled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
scale_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (scale_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse)
{
scale_image=DestroyImage(scale_image);
return((Image *) NULL);
}
/*
Allocate memory.
*/
x_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*x_vector));
scanline=x_vector;
if (image->rows != scale_image->rows)
scanline=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*scanline));
scale_scanline=(double *) AcquireQuantumMemory((size_t) scale_image->columns,
MaxPixelChannels*sizeof(*scale_scanline));
y_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*y_vector));
if ((scanline == (double *) NULL) || (scale_scanline == (double *) NULL) ||
(x_vector == (double *) NULL) || (y_vector == (double *) NULL))
{
if ((image->rows != scale_image->rows) && (scanline != (double *) NULL))
scanline=(double *) RelinquishMagickMemory(scanline);
if (scale_scanline != (double *) NULL)
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (x_vector != (double *) NULL)
x_vector=(double *) RelinquishMagickMemory(x_vector);
if (y_vector != (double *) NULL)
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_image=DestroyImage(scale_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Scale image.
*/
number_rows=0;
next_row=MagickTrue;
span.y=1.0;
scale.y=(double) scale_image->rows/(double) image->rows;
(void) memset(y_vector,0,(size_t) MaxPixelChannels*image->columns*
sizeof(*y_vector));
n=0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
scale_view=AcquireAuthenticCacheView(scale_image,exception);
for (y=0; y < (ssize_t) scale_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
break;
q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
alpha=1.0;
if (scale_image->rows == image->rows)
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
}
else
{
/*
Scale Y direction.
*/
while (scale.y < span.y)
{
if ((next_row != MagickFalse) &&
(number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
}
for (x=0; x < (ssize_t) image->columns; x++)
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
y_vector[x*GetPixelChannels(image)+i]+=scale.y*
x_vector[x*GetPixelChannels(image)+i];
span.y-=scale.y;
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
next_row=MagickFalse;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y*
x_vector[x*GetPixelChannels(image)+i];
scanline[x*GetPixelChannels(image)+i]=pixel[i];
y_vector[x*GetPixelChannels(image)+i]=0.0;
}
}
scale.y-=span.y;
if (scale.y <= 0)
{
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
span.y=1.0;
}
if (scale_image->columns == image->columns)
{
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*scanline[
x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
else
{
ssize_t
t;
/*
Scale X direction.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
span.x=1.0;
t=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
scale.x=(double) scale_image->columns/(double) image->columns;
while (scale.x >= span.x)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i];
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
}
scale.x-=span.x;
span.x=1.0;
next_column=MagickTrue;
}
if (scale.x > 0)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i];
span.x-=scale.x;
}
}
if (span.x > 0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i];
}
if ((next_column == MagickFalse) &&
(t < (ssize_t) scale_image->columns))
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scale_scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scale_scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*
scale_scanline[x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse)
{
status=MagickFalse;
break;
}
proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
scale_view=DestroyCacheView(scale_view);
image_view=DestroyCacheView(image_view);
/*
Free allocated memory.
*/
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (scale_image->rows != image->rows)
scanline=(double *) RelinquishMagickMemory(scanline);
x_vector=(double *) RelinquishMagickMemory(x_vector);
scale_image->type=image->type;
if (status == MagickFalse)
scale_image=DestroyImage(scale_image);
return(scale_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h u m b n a i l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThumbnailImage() changes the size of an image to the given dimensions and
% removes any associated profiles. The goal is to produce small low cost
% thumbnail images suited for display on the Web.
%
% The format of the ThumbnailImage method is:
%
% Image *ThumbnailImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ThumbnailImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleFactor 5
char
*url,
value[MagickPathExtent];
const char
*name;
Image
*thumbnail_image;
double
x_factor,
y_factor;
struct stat
attributes;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
if ((x_factor*y_factor) > 0.1)
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128))
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
{
Image
*sample_image;
sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows,
exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter,
exception);
sample_image=DestroyImage(sample_image);
}
if (thumbnail_image == (Image *) NULL)
return(thumbnail_image);
(void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page);
if (thumbnail_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel,exception);
thumbnail_image->depth=8;
thumbnail_image->interlace=NoInterlace;
/*
Strip all profiles except color profiles.
*/
ResetImageProfileIterator(thumbnail_image);
for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; )
{
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
{
(void) DeleteImageProfile(thumbnail_image,name);
ResetImageProfileIterator(thumbnail_image);
}
name=GetNextImageProfile(thumbnail_image);
}
(void) DeleteImageProperty(thumbnail_image,"comment");
(void) CopyMagickString(value,image->magick_filename,MagickPathExtent);
if (strstr(image->magick_filename,"//") == (char *) NULL)
(void) FormatLocaleString(value,MagickPathExtent,"file://%s",
image->magick_filename);
(void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception);
(void) CopyMagickString(value,image->magick_filename,MagickPathExtent);
if ( GetPathAttributes(image->filename,&attributes) != MagickFalse )
{
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
attributes.st_mtime);
(void) SetImageProperty(thumbnail_image,"Thumb::MTime",value,exception);
}
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
attributes.st_mtime);
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",MagickPathExtent,
value);
(void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception);
(void) FormatLocaleString(value,MagickPathExtent,"image/%s",image->magick);
LocaleLower(value);
(void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception);
url=GetMagickHomeURL();
(void) SetImageProperty(thumbnail_image,"software",url,exception);
url=DestroyString(url);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->magick_columns);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value,
exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->magick_rows);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Height",value,
exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetImageListLength(image));
(void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value,
exception);
return(thumbnail_image);
}
|
GB_binop__min_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_int32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__min_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__min_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_int32)
// A*D function (colscale): GB (_AxD__min_int32)
// D*A function (rowscale): GB (_DxB__min_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__min_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__min_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_int32)
// C=scalar+B GB (_bind1st__min_int32)
// C=scalar+B' GB (_bind1st_tran__min_int32)
// C=A+scalar GB (_bind2nd__min_int32)
// C=A'+scalar GB (_bind2nd_tran__min_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = GB_IMIN (aij, bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IMIN (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_INT32 || GxB_NO_MIN_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__min_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__min_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__min_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = GB_IMIN (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = GB_IMIN (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_IMIN (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_IMIN (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cg_main.c | //MIT License
//
//Copyright (c) 2018 Sicong Zhuang
//
//Permission is hereby granted, free of charge, to any person obtaining a copy
//of this software and associated documentation files (the "Software"), to deal
//in the Software without restriction, including without limitation the rights
//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
//copies of the Software, and to permit persons to whom the Software is
//furnished to do so, subject to the following conditions:
//
//The above copyright notice and this permission notice shall be included in all
//copies or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
//SOFTWARE.
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include "cg_main.h"
#include "csparse.h"
void *Ahbh;
void *Ahb;
void *Acsr;
int n;
int bm;
int cgit;
double prec;
int correction;
int iter_fuse;
//int is_precond;
int rep;
char *aname;
char *rhsfname;
double orth_fac;
int cglog_level;
int cg_ver;
double *rhs;
double *x;
void *preconditioner;
css **S;
csn **N;
cs *Acs;
css *SA;
csn *NA;
cs *AA;
double fp_one = 1.0;
double fp_mone = -1.0;
double fp_nought = 0.0;
int main(int argc, char *argv[])
{
if ( cg_config(argc, argv) ) {
return 1;
}
if ( cg_setup(n, bm, &x, &rhs) ) {
return 2;
}
hbmat_t *Atmp = Acsr;
int dim = Atmp->m;
double *pool = malloc(20*dim*sizeof(double));
#pragma omp register ([20*dim]pool)
for ( int i = 0; i < rep; i++ ) {
memset(pool, 0, 20*dim*sizeof(double));
switch ( cg_ver ) {
case 0:
printf("ALG1 PCG\n");
CG_ALG1(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, orth_fac, cglog_level); //Algorithm 1 original PCG
break;
case 1:
printf("ALG3 Chronopoulos\n");
CG_ALG3(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, orth_fac, cglog_level); //Algorithm 3 Chronopoulos
break;
case 2:
printf("ALG4 Pipelined\n");
CG_ALG4(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, orth_fac, cglog_level); //Algorithm 4 Pipelined
break;
case 3:
printf("ALG7 Gropp\n");
CG_ALG7(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, orth_fac, cglog_level); //Algorithm 7 Gropp
break;
case 4:
printf("ALG4 IFCG\n");
CG_IFCG(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, iter_fuse, orth_fac, cglog_level); //Algorithm 4 IFCG
break;
case 5:
printf("ALG4 IFCG V2\n");
CG_IFCG_V2(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, iter_fuse, orth_fac, cglog_level); //Algorithm 4 IFCG V2
break;
case 6:
printf("ALG4 IFCG CENTINEL\n");
CG_IFCG_Centinel(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, iter_fuse, orth_fac, cglog_level); //Algorithm 4 IFCG Centinel
break;
case 7:
printf("ALG4 IFCG V2 CENTINEL\n");
CG_IFCG_V2_Centinel(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, iter_fuse, orth_fac, cglog_level); //Algorithm 4 IFCG V2 Centinel
break;
//TODO BPs
case 10:
printf("ALG4 IFCG BP\n");
CG_IFCG_BP(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, iter_fuse, orth_fac, cglog_level, pool); //Algorithm 4 IFCG BP
break;
case 11:
printf("ALG4 IFCG V2 BP\n");
CG_IFCG_V2_BP(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, iter_fuse, orth_fac, cglog_level, pool); //Algorithm 4 IFCG V2 BP
break;
case 12:
printf("ALG1 PCG BP\n");
CG_ALG1_BP(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, orth_fac, cglog_level, pool); //Algorithm 1 original PCG BP
break;
case 13:
printf("ALG3 Chronopoulos BP\n");
CG_ALG3_BP(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, orth_fac, cglog_level, pool); //Algorithm 3 Chronopoulos BP
break;
case 14:
printf("ALG4 Pipelined BP\n");
CG_ALG4_BP(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, orth_fac, cglog_level, pool); //Algorithm 4 Pipelined BP
break;
case 15:
printf("ALG7 Gropp BP\n");
CG_ALG7_BP(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, orth_fac, cglog_level, pool); //Algorithm 7 Gropp BP
break;
//TODO ILUs
case 20:
printf("ALG4 IFCG ILU BP\n");
CG_IFCG_ILU_BP(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, iter_fuse, orth_fac, cglog_level, pool); //Algorithm 4 IFCG BP
break;
case 21:
printf("ALG4 IFCG V2 ILU BP\n");
CG_IFCG_V2_ILU_BP(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, iter_fuse, orth_fac, cglog_level, pool); //Algorithm 4 IFCG V2 BP
break;
case 22:
printf("ALG1 PCG ILU BP\n");
CG_ALG1_ILU_BP(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, orth_fac, cglog_level, pool); //Algorithm 1 original PCG BP
break;
case 23:
printf("ALG3 Chronopoulos ILU BP\n");
CG_ALG3_ILU_BP(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, orth_fac, cglog_level, pool); //Algorithm 3 Chronopoulos BP
break;
case 24:
printf("ALG4 Pipelined ILU BP\n");
CG_ALG4_ILU_BP(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, orth_fac, cglog_level, pool); //Algorithm 4 Pipelined BP
break;
case 25:
printf("ALG7 Gropp ILU BP\n");
CG_ALG7_ILU_BP(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, orth_fac, cglog_level, pool); //Algorithm 7 Gropp BP
break;
case 80:
printf("ALG4 IFCG Commutative\n"); //Algorithm 4 IFCG Commutative (commutative dot-product)
CG_ALG4_V4(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, iter_fuse, orth_fac, cglog_level);
break;
case 81:
printf("ALG4 IFCG-AT\n"); //Algorithm 4 IFCG-AT
CG_ALG4_AT(Acsr, Ahbh, x, rhs, cgit, bm, prec, correction, iter_fuse, orth_fac, cglog_level);
break;
default:
printf("No algorithm selected\n");
break;
}
}
return 0;
}
/* Standard PCG */
int CG_ALG1(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, double orth_fac, int cglog_level)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *pool = calloc(4 * 2 * n, sizeof(double));
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha * p_i
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u = M^-1 * r_i+1
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta * p_i
double *s = malloc(n * sizeof(double)); // s = Ap
double *alpha1 = calloc(2, sizeof(double));
double *alpha2 = calloc(2, sizeof(double));
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* p[0] = z[0] */
bblas_dcopy(1, bm, 1, n, 1, u[i], p[i]);
/* alpha1[0] = <r[0], z[0]> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &alpha1[i]);
int k;
for ( k = 0; k < cgit; k++ ) {
start_timer();
int iprev = i;
i = i ^ 0x1;
/* s = A * p[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, p[iprev], fp_nought, s);
/* alpha2[i] = <s, p[i]> */
bblas_ddot(1, bm, 1, n, 1, s, p[iprev], &alpha2[i]);
bblas_dcpaxpy_comb(bm, 1, n, 1, fp_mone, &alpha1[iprev], &alpha2[i], s, p[iprev], r[iprev], x[iprev], r[i], x[i]);
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
/* u[i+1] = M^-1 * r[i+1] */
bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* alpha1[i+1] = <r, u> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &alpha1[i]);
/* p[i+1] = u[i+1] + transpose(beta[i]) * p[i] */
bblas_extm_daxpy(1, bm, 1, n, 1, &alpha1[i], &alpha1[iprev], p[iprev], u[i], p[i]);
#pragma omp taskwait
stop_timer(&elapses[k]);
alpha1[iprev] = alpha2[iprev] = (double) 0;
// BLAS_gemm(OMPSSBLAS_TRANSP, OMPSSBLAS_NTRANSP, 1, 1, n, FP_ONE, p[i], n, s, n, FP_NOUGHT, &orth, 1);
// orth = FP_ABS(orth);
// if (isgreater(orth, porth * orth_fac)){
// fprintf(stderr, "orth fail %E %E\n", orth, porth*orth_fac);
// break;
// }
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[k] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
// fprintf(stdout, "%d %E\n", k, residuals[k]);
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg1.log", k, residuals, elapses);
free(pool);
free(alpha1);
free(alpha2);
free(residuals);
free(elapses);
return 0;
}
/* Chronopoulos PCG */
int CG_ALG3(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, double orth_fac, int cglog_level)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *pool = calloc(6 * 2 * n, sizeof(double));
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha * s_i
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u = M^-1 * r_i+1
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w = A * u_i+1
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta * p_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i+1 = w_i+1 + beta * s_i
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
/* gamma[0] = <r[0], u[0]> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &gamma[i]);
/* alpha[0] = gamma[0]/<w[0], u[0]> */
bblas_ddot(1, bm, 1, n, 1, w[i], u[i], &alpha[i]);
#pragma omp taskwait on (gamma[i], alpha[i])
alpha[i] = gamma[i] / alpha[i];
int k;
for ( k = 0; k < cgit; k++ ) {
start_timer();
int iprev = i;
i = i ^ 0x1;
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *pp0 = &(p[iprev])[j];
double *up0 = &(u[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *pp1 = &(p[i])[j];
double *up1 = &(u[i])[j];
double *sp1 = &(s[i])[j];
double *wp1 = &(w[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
#pragma omp task out([c]pp1, [c]sp1, [c]xp1, [c]rp1) priority(1) label(alg3_fuse)
{
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[iprev], pp0, 1, pp1, 1);
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[iprev], sp0, 1, sp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[iprev], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[iprev], sp1, 1, rp1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
/* u[i+1] = M^-1 * r[i+1] */
bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* w[i+1] = A * u[i+1] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
cg_ddot2(1, bm, 1, n, 1, r[i], u[i], &gamma[i], w[i], u[i], &delta);
#pragma omp taskwait
beta[i] = gamma[i] / gamma[iprev];
alpha[i] = gamma[i]/(delta - beta[i] * gamma[i] / alpha[iprev]);
stop_timer(&elapses[k]);
gamma[iprev] = delta = (double) 0;
//TODO Implement p-orthogonality check
#if 0
BLAS_gemm(OMPSSBLAS_TRANSP, OMPSSBLAS_NTRANSP, 1, 1, n, FP_ONE, p[i], n, s, n, FP_NOUGHT, &orth, 1);
orth = FP_ABS(orth);
if (isgreater(orth, porth * orth_fac)){
fprintf(stderr, "orth fail %E %E\n", orth, porth*orth_fac);
break;
}
#endif
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[k] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
// fprintf(stdout, "%d %E\n", k, residuals[k]);
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg3.log", k, residuals, elapses);
free(pool);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
/* Pipelined PCG */
int CG_ALG4(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, double orth_fac, int cglog_level)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *pool = calloc(10 * 2 * n, sizeof(double));
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *m[2] = {&pool[offs], &pool[offs+n]}; // m_i = M^-1 * w_i
offs += 2 * n;
double *n0[2] = {&pool[offs], &pool[offs+n]}; // n_i = A * m_i
offs += 2 * n;
double *z[2] = {&pool[offs], &pool[offs+n]}; // z_i = n_i + beta_i * z_i-1
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
int k;
for ( k = 0; k < cgit; k++ ) {
start_timer();
int iprev = i;
i = i ^ 0x1;
/* m[i] = M^-1 * w[i] */
bsblas_dcholsolv2(1, bm, n, S, N, w[iprev], m[i]);
/* n[i] = A * m[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, m[i], fp_nought, n0[i]);
/*
* gamma[i] = <r[i], u[i]>
* delta = <w[i], u[i]>
*/
cg_ddot2(1, bm, 1, n, 1, r[iprev], u[iprev], &gamma[i], w[iprev], u[iprev], &delta);
#pragma omp taskwait on(gamma[i], delta)
if ( k > 0 ) {
beta[i] = gamma[i]/gamma[iprev];
alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
beta[i] = (double) 0;
alpha[i] = gamma[i]/delta;
}
gamma[iprev] = delta = 0;
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *zp0 = &(z[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *zp1 = &(z[i])[j];
double *qp1 = &(q[i])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
double *mp1 = &(m[i])[j];
double *np1 = &(n0[i])[j];
#pragma omp task out([c]zp1, [c]qp1, [c]sp1, [c]pp1, [c]xp1, [c]rp1, [c]up1, [c]wp1) \
in([c]zp0, [c]qp0, [c]sp0, [c]pp0, [c]np1, [c]mp1, [c]up0, [c]xp0, [c]wp0, [c]rp0) \
priority(1) label(alg4_fuse)
{
/* z_i = n_i + beta_i * z_i-1 */
BLAS_cp(c, np1, 1, zp1, 1);
BLAS_axpy(c, beta[i], zp0, 1, zp1, 1);
/* q_i = m_i + beta_i * q_i-1 */
BLAS_cp(c, mp1, 1, qp1, 1);
BLAS_axpy(c, beta[i], qp0, 1, qp1, 1);
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp1, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
/* w_i+1 = w_i - alpha_i * z_i */
BLAS_cp(c, wp0, 1, wp1, 1);
BLAS_axpy(c, -1*alpha[i], zp1, 1, wp1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
#pragma omp taskwait
// gamma[iprev] = delta = 0;
stop_timer(&elapses[k]);
//TODO Implement p-orthogonality check
#if 0
BLAS_gemm(OMPSSBLAS_TRANSP, OMPSSBLAS_NTRANSP, 1, 1, n, FP_ONE, p[i], n, s, n, FP_NOUGHT, &orth, 1);
orth = FP_ABS(orth);
if (isgreater(orth, porth * orth_fac)){
fprintf(stderr, "orth fail %E %E\n", orth, porth*orth_fac);
break;
}
#endif
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[k] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
// fprintf(stdout, "%d %E\n", k, residuals[k]);
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg4.log", k, residuals, elapses);
free(pool);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
/* Gropp PCG */
int CG_ALG7(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, double orth_fac, int cglog_level)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *pool = calloc(7 * 2 * n, sizeof(double));
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* p[0] = u[0] */
bblas_dcopy(1, bm, 1, n, 1, u[i], p[i]);
/* s[0] = A * p[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, p[i], fp_nought, s[i]);
/* gamma[0] = <r[0], u[0]> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &gamma[i]);
int k;
for ( k = 0; k < cgit; k++ ) {
start_timer();
int iprev = i;
i = i ^ 0x1;
/* delta = <p[i], s[i]> */
bblas_ddot(1, bm, 1, n, 1, p[iprev], s[iprev], &delta);
/* q[i] = M^-1 * s[i] */
bsblas_dcholsolv2(1, bm, n, S, N, s[iprev], q[i]);
#pragma omp taskwait on(delta)
alpha[i] = gamma[iprev]/delta;
/* Axpy fuse x,r,u */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *xp0 = &(x[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *up0 = &(u[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *xp1 = &(x[i])[j];
double *pp1 = &(p[i])[j];
double *rp1 = &(r[i])[j];
double *sp1 = &(s[i])[j];
double *up1 = &(u[i])[j];
double *qp1 = &(q[i])[j];
#pragma omp task in([c]qp1) out([c]xp1, [c]rp1, [c]up1) priority(1) label(alg7_fuse0)
{
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp0, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp0, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
/* gamma[i+1] = <r[i+1], u[i+1]> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &gamma[i]);
/* w[i+1] = A * u[i+1] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
#pragma omp taskwait on(gamma[i])
beta[i] = gamma[i]/gamma[iprev];
/* Axpy fuse p,s */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *pp0 = &(p[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *pp1 = &(p[i])[j];
double *sp1 = &(s[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
#pragma omp task in([c]up1, [c]pp0, [c]wp1, [c]sp0) priority(1) label(alg7_fuse1)
{
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up1, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp1, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
}
}
#pragma omp taskwait
gamma[iprev] = delta = 0;
stop_timer(&elapses[k]);
//TODO Implement p-orthogonality check
#if 0
BLAS_gemm(OMPSSBLAS_TRANSP, OMPSSBLAS_NTRANSP, 1, 1, n, FP_ONE, p[i], n, s, n, FP_NOUGHT, &orth, 1);
orth = FP_ABS(orth);
if (isgreater(orth, porth * orth_fac)){
fprintf(stderr, "orth fail %E %E\n", orth, porth*orth_fac);
break;
}
#endif
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[k] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
// fprintf(stdout, "%d %E\n", k, residuals[k]);
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg7.log", k, residuals, elapses);
free(pool);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
/* IFCG */
int CG_IFCG(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, int fuse, double orth_fac, int cglog_level)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *pool = calloc(10 * 2 * n, sizeof(double));
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *m[2] = {&pool[offs], &pool[offs+n]}; // m_i = M^-1 * w_i
offs += 2 * n;
double *n0[2] = {&pool[offs], &pool[offs+n]}; // n_i = A * m_i
offs += 2 * n;
double *z[2] = {&pool[offs], &pool[offs+n]}; // z_i = n_i + beta_i * z_i-1
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
start_timer();
int kk = 0;
int k;
for ( k = 0; k < cgit; k++) {
int iprev = i;
i = i ^ 0x1;
/* m[i] = M^-1 * w[i] */
bsblas_dcholsolv2(1, bm, n, S, N, w[iprev], m[i]);
/* n[i] = A * m[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, m[i], fp_nought, n0[i]);
/*
* gamma[i] = <r[i], u[i]>
* delta = <w[i], u[i]>
*/
cg_ddot2(1, bm, 1, n, 1, r[iprev], u[iprev], &gamma[i], w[iprev], u[iprev], &delta);
#pragma omp taskwait on(gamma[i], delta)
if ( k > 0 ) {
beta[i] = gamma[i]/gamma[iprev];
alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
beta[i] = (double) 0;
alpha[i] = gamma[i]/delta;
}
gamma[iprev] = delta = 0;
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *zp0 = &(z[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *zp1 = &(z[i])[j];
double *qp1 = &(q[i])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
double *mp1 = &(m[i])[j];
double *np1 = &(n0[i])[j];
#pragma omp task out([c]zp1, [c]qp1, [c]sp1, [c]pp1, [c]xp1, [c]rp1, [c]up1, [c]wp1) \
in([c]zp0, [c]qp0, [c]sp0, [c]pp0, [c]np1, [c]mp1, [c]up0, [c]xp0, [c]wp0, [c]rp0) \
priority(1) label(alg4_fuse)
{
/* z_i = n_i + beta_i * z_i-1 */
BLAS_cp(c, np1, 1, zp1, 1);
BLAS_axpy(c, beta[i], zp0, 1, zp1, 1);
/* q_i = m_i + beta_i * q_i-1 */
BLAS_cp(c, mp1, 1, qp1, 1);
BLAS_axpy(c, beta[i], qp0, 1, qp1, 1);
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp1, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
/* w_i+1 = w_i - alpha_i * z_i */
BLAS_cp(c, wp0, 1, wp1, 1);
BLAS_axpy(c, -1*alpha[i], zp1, 1, wp1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
if ( k > 0 && k % fuse == 0 ) {
#pragma omp taskwait
stop_timer(&elapses[kk]);
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[kk] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
kk += 1;
start_timer();
}
}
#pragma omp taskwait
if ( k == cgit ) {
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
stop_timer(&elapses[kk]);
residuals[kk] = sr2norm;
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg4_ifcg.log", kk, residuals, elapses);
free(pool);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
/* IFCG Centinel */
int CG_IFCG_Centinel(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, int fuse, double orth_fac, int cglog_level)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *pool = calloc(10 * 2 * n, sizeof(double));
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *m[2] = {&pool[offs], &pool[offs+n]}; // m_i = M^-1 * w_i
offs += 2 * n;
double *n0[2] = {&pool[offs], &pool[offs+n]}; // n_i = A * m_i
offs += 2 * n;
double *z[2] = {&pool[offs], &pool[offs+n]}; // z_i = n_i + beta_i * z_i-1
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
int bs = (n+bm-1)/bm;
double *a_gamma = malloc(bs * sizeof(double));
double *a_delta = malloc(bs * sizeof(double));
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
start_timer();
int kk = 0;
int k;
for ( k = 0; k < cgit; k++) {
int iprev = i;
i = i ^ 0x1;
/* m[i] = M^-1 * w[i] */
bsblas_dcholsolv2(1, bm, n, S, N, w[iprev], m[i]);
/* n[i] = A * m[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, m[i], fp_nought, n0[i]);
/*
* gamma[i] = <r[i], u[i]>
* delta = <w[i], u[i]>
*/
// cg_ddot2_array(1, bm, 1, n, 1, r[iprev], u[iprev], a_gamma, w[iprev], u[iprev], a_delta);
cg_ddot2(1, bm, 1, n, 1, r[iprev], u[iprev], &gamma[i], w[iprev], u[iprev], &delta);
#pragma omp task in(gamma[i], delta) out(beta[i], alpha[i]) label(centinel)
{
if ( k > 0 ) {
beta[i] = gamma[i]/gamma[iprev];
alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
beta[i] = (double) 0;
alpha[i] = gamma[i]/delta;
}
gamma[iprev] = delta = 0;
}
#if 0
#pragma omp task in(a_gamma[0:bs-1], a_delta[0:bs-1]) out(beta[i], alpha[i]) label(centinel)
{
gamma[i] = (double) 0;
delta = (double) 0;
for (int ii = 0; ii < bs; ii++) {
gamma[i] += a_gamma[ii];
delta += a_delta[ii];
}
if ( k > 0 ) {
beta[i] = gamma[i]/gamma[iprev];
alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
beta[i] = (double) 0;
alpha[i] = gamma[i]/delta;
}
}
#endif
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *zp0 = &(z[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *zp1 = &(z[i])[j];
double *qp1 = &(q[i])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
double *mp1 = &(m[i])[j];
double *np1 = &(n0[i])[j];
#pragma omp task out([c]zp1, [c]qp1, [c]sp1, [c]pp1, [c]xp1, [c]rp1, [c]up1, [c]wp1) \
in(alpha[i], beta[i], [c]zp0, [c]qp0, [c]sp0, [c]pp0, [c]np1, [c]mp1, [c]up0, [c]xp0, [c]wp0, [c]rp0) \
priority(1) label(alg4_fuse)
{
/* z_i = n_i + beta_i * z_i-1 */
BLAS_cp(c, np1, 1, zp1, 1);
BLAS_axpy(c, beta[i], zp0, 1, zp1, 1);
/* q_i = m_i + beta_i * q_i-1 */
BLAS_cp(c, mp1, 1, qp1, 1);
BLAS_axpy(c, beta[i], qp0, 1, qp1, 1);
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp1, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
/* w_i+1 = w_i - alpha_i * z_i */
BLAS_cp(c, wp0, 1, wp1, 1);
BLAS_axpy(c, -1*alpha[i], zp1, 1, wp1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
if ( k > 0 && k % fuse == 0 ) {
#pragma omp taskwait
stop_timer(&elapses[kk]);
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[kk] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
kk += 1;
start_timer();
}
}
#pragma omp taskwait
if ( k == cgit ) {
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
stop_timer(&elapses[kk]);
residuals[kk] = sr2norm;
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg4_ifcg_centinel.log", kk, residuals, elapses);
free(pool);
free(alpha);
free(beta);
free(gamma);
free(a_gamma);
free(a_delta);
free(residuals);
free(elapses);
return 0;
}
/* IFCG V2 */
int CG_IFCG_V2(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, int fuse, double orth_fac, int cglog_level)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *pool = calloc(10 * 2 * n, sizeof(double));
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *m[2] = {&pool[offs], &pool[offs+n]}; // m_i = M^-1 * w_i
offs += 2 * n;
double *n0[2] = {&pool[offs], &pool[offs+n]}; // n_i = A * m_i
offs += 2 * n;
double *z[2] = {&pool[offs], &pool[offs+n]}; // z_i = n_i + beta_i * z_i-1
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
start_timer();
int kk = 0;
int k;
for ( k = 0; k < cgit; k++) {
int iprev = i;
i = i ^ 0x1;
/* m[i] = M^-1 * w[i] */
bsblas_dcholsolv2(1, bm, n, S, N, w[iprev], m[i]);
/*
* gamma[i] = <r[i], u[i]>
*/
bblas_ddot(1, bm, 1, n, 1, r[iprev], u[iprev], &gamma[i]);
bblas_ddot(1, bm, 1, n, 1, w[iprev], u[iprev], &delta);
#pragma omp taskwait on(gamma[i])
if ( k > 0 ) {
beta[i] = gamma[i]/gamma[iprev];
} else {
beta[i] = (double) 0;
}
/*
* s_i = w_i + beta_i * s_i-1
* p_i = u_i + beta_i * p_i-1
*/
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
#pragma omp task out([c]sp1, [c]pp1) in(beta[i], [c]sp0, [c]pp0, [c]up0, [c]wp0) priority(1) label(alg4_apxy)
{
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
}
}
/* n[i] = A * m[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, m[i], fp_nought, n0[i]);
#pragma omp taskwait on(delta)
if ( k > 0 ) {
alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
alpha[i] = gamma[i]/delta;
}
gamma[iprev] = delta = 0;
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *zp0 = &(z[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *zp1 = &(z[i])[j];
double *qp1 = &(q[i])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
double *mp1 = &(m[i])[j];
double *np1 = &(n0[i])[j];
#pragma omp task out([c]zp1, [c]qp1, [c]xp1, [c]rp1, [c]up1, [c]wp1) \
in(alpha[i], [c]zp0, [c]qp0, [c]sp1, [c]pp1, [c]np1, [c]mp1, [c]up0, [c]xp0, [c]wp0, [c]rp0) \
priority(1) label(alg4_fuse)
{
/* z_i = n_i + beta_i * z_i-1 */
BLAS_cp(c, np1, 1, zp1, 1);
BLAS_axpy(c, beta[i], zp0, 1, zp1, 1);
/* q_i = m_i + beta_i * q_i-1 */
BLAS_cp(c, mp1, 1, qp1, 1);
BLAS_axpy(c, beta[i], qp0, 1, qp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp1, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
/* w_i+1 = w_i - alpha_i * z_i */
BLAS_cp(c, wp0, 1, wp1, 1);
BLAS_axpy(c, -1*alpha[i], zp1, 1, wp1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
if ( k > 0 && k % fuse == 0 ) {
#pragma omp taskwait
stop_timer(&elapses[kk]);
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[kk] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
kk += 1;
start_timer();
}
}
#pragma omp taskwait
if ( k == cgit ) {
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
stop_timer(&elapses[kk]);
residuals[kk] = sr2norm;
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg4_ifcg_v2.log", kk, residuals, elapses);
free(pool);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
/* IFCG V2 Centinel */
int CG_IFCG_V2_Centinel(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, int fuse, double orth_fac, int cglog_level)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *pool = calloc(10 * 2 * n, sizeof(double));
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *m[2] = {&pool[offs], &pool[offs+n]}; // m_i = M^-1 * w_i
offs += 2 * n;
double *n0[2] = {&pool[offs], &pool[offs+n]}; // n_i = A * m_i
offs += 2 * n;
double *z[2] = {&pool[offs], &pool[offs+n]}; // z_i = n_i + beta_i * z_i-1
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
int bs = (n+bm-1)/bm;
double *a_gamma = malloc(bs * sizeof(double));
double *a_delta = malloc(bs * sizeof(double));
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
start_timer();
int kk = 0;
int k;
for ( k = 0; k < cgit; k++) {
int iprev = i;
i = i ^ 0x1;
/* m[i] = M^-1 * w[i] */
bsblas_dcholsolv2(1, bm, n, S, N, w[iprev], m[i]);
/*
* gamma[i] = <r[i], u[i]>
*/
bblas_ddot(1, bm, 1, n, 1, r[iprev], u[iprev], &gamma[i]);
bblas_ddot(1, bm, 1, n, 1, w[iprev], u[iprev], &delta);
#pragma omp task in(gamma[i]) out(beta[i]) no_copy_deps label(sentinel0)
{
if ( k > 0 ) {
beta[i] = gamma[i]/gamma[iprev];
} else {
beta[i] = (double) 0;
}
}
#if 0
bblas_ddot_array(1, bm, 1, n, 1, r[iprev], u[iprev], a_gamma);
bblas_ddot_array(1, bm, 1, n, 1, w[iprev], u[iprev], a_delta);
#pragma omp task in(a_gamma[0:bs-1]) out(beta[i]) no_copy_deps label(sentinel0)
{
gamma[i] = (double) 0;
for (int ii = 0; ii < bs; ii++) {
gamma[i] += a_gamma[ii];
}
if ( k > 0 ) {
beta[i] = gamma[i]/gamma[iprev];
} else {
beta[i] = (double) 0;
}
}
#endif
/*
* s_i = w_i + beta_i * s_i-1
* p_i = u_i + beta_i * p_i-1
*/
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
#pragma omp task out([c]sp1, [c]pp1) in(beta[i], [c]sp0, [c]pp0, [c]up0, [c]wp0) priority(1) label(alg4_apxy)
{
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
}
}
/* n[i] = A * m[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, m[i], fp_nought, n0[i]);
#pragma omp task in(beta[i], delta) out(alpha[i]) label(sentinel1) no_copy_deps
{
if ( k > 0 ) {
alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
alpha[i] = gamma[i]/delta;
}
gamma[iprev] = delta = 0;
}
#if 0
#pragma omp task in(beta[i], a_delta[0:bs-1]) out(alpha[i]) label(sentinel1) no_copy_deps
{
delta = (double) 0;
for (int ii= 0; ii < bs; ii++) {
delta += a_delta[ii];
}
if ( k > 0 ) {
alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
alpha[i] = gamma[i]/delta;
}
}
#endif
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *zp0 = &(z[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *zp1 = &(z[i])[j];
double *qp1 = &(q[i])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
double *mp1 = &(m[i])[j];
double *np1 = &(n0[i])[j];
#pragma omp task out([c]zp1, [c]qp1, [c]xp1, [c]rp1, [c]up1, [c]wp1) \
in(alpha[i], [c]zp0, [c]qp0, [c]sp1, [c]pp1, [c]np1, [c]mp1, [c]up0, [c]xp0, [c]wp0, [c]rp0) \
priority(1) label(alg4_fuse)
{
/* z_i = n_i + beta_i * z_i-1 */
BLAS_cp(c, np1, 1, zp1, 1);
BLAS_axpy(c, beta[i], zp0, 1, zp1, 1);
/* q_i = m_i + beta_i * q_i-1 */
BLAS_cp(c, mp1, 1, qp1, 1);
BLAS_axpy(c, beta[i], qp0, 1, qp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp1, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
/* w_i+1 = w_i - alpha_i * z_i */
BLAS_cp(c, wp0, 1, wp1, 1);
BLAS_axpy(c, -1*alpha[i], zp1, 1, wp1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
if ( k > 0 && k % fuse == 0 ) {
#pragma omp taskwait
stop_timer(&elapses[kk]);
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[kk] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
kk += 1;
start_timer();
}
}
#pragma omp taskwait
if ( k == cgit ) {
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
stop_timer(&elapses[kk]);
residuals[kk] = sr2norm;
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg4_ifcg_v2_centinel.log", kk, residuals, elapses);
free(pool);
free(alpha);
free(beta);
free(gamma);
free(a_gamma);
free(a_delta);
free(residuals);
free(elapses);
return 0;
}
/* Iteration-fusing Standard PCG */
int CG_IFCG_PCG(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, int fuse, double orth_fac, int cglog_level)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *pool = calloc(4 * 2 * n, sizeof(double));
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha * p_i
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u = M^-1 * r_i+1
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta * p_i
double *s = malloc(n * sizeof(double)); // s = Ap
double *alpha1 = calloc(2, sizeof(double));
double *alpha2 = calloc(2, sizeof(double));
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* p[0] = z[0] */
bblas_dcopy(1, bm, 1, n, 1, u[i], p[i]);
/* alpha1[0] = <r[0], z[0]> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &alpha1[i]);
int k;
for ( k = 0; k < cgit; k++ ) {
start_timer();
int iprev = i;
i = i ^ 0x1;
/* s = A * p[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, p[iprev], fp_nought, s);
/* alpha2[i] = <s, p[i]> */
bblas_ddot(1, bm, 1, n, 1, s, p[iprev], &alpha2[i]);
bblas_dcpaxpy_comb(bm, 1, n, 1, fp_mone, &alpha1[iprev], &alpha2[i], s, p[iprev], r[iprev], x[iprev], r[i], x[i]);
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
/* u[i+1] = M^-1 * r[i+1] */
bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* alpha1[i+1] = <r, u> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &alpha1[i]);
/* p[i+1] = u[i+1] + transpose(beta[i]) * p[i] */
bblas_extm_daxpy(1, bm, 1, n, 1, &alpha1[i], &alpha1[iprev], p[iprev], u[i], p[i]);
#pragma omp taskwait
stop_timer(&elapses[k]);
alpha1[iprev] = alpha2[iprev] = (double) 0;
// BLAS_gemm(OMPSSBLAS_TRANSP, OMPSSBLAS_NTRANSP, 1, 1, n, FP_ONE, p[i], n, s, n, FP_NOUGHT, &orth, 1);
// orth = FP_ABS(orth);
// if (isgreater(orth, porth * orth_fac)){
// fprintf(stderr, "orth fail %E %E\n", orth, porth*orth_fac);
// break;
// }
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[k] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
// fprintf(stdout, "%d %E\n", k, residuals[k]);
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg1.log", k, residuals, elapses);
free(pool);
free(alpha1);
free(alpha2);
free(residuals);
free(elapses);
return 0;
}
/* TODO BPs */
int CG_ALG1_BP(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, double orth_fac, int cglog_level, double *pool)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
// double *pool = calloc(4 * 2 * n, sizeof(double));
// #pragma omp register ([4*2*n]pool)
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha * p_i
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u = M^-1 * r_i+1
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta * p_i
double *s = malloc(n * sizeof(double)); // s = Ap
double *alpha1 = calloc(2, sizeof(double));
double *alpha2 = calloc(2, sizeof(double));
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
/* p[0] = z[0] */
bblas_dcopy(1, bm, 1, n, 1, u[i], p[i]);
/* alpha1[0] = <r[0], z[0]> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &alpha1[i]);
int k;
for ( k = 0; k < cgit; k++ ) {
start_timer();
int iprev = i;
i = i ^ 0x1;
/* s = A * p[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, p[iprev], fp_nought, s);
/* alpha2[i] = <s, p[i]> */
bblas_ddot(1, bm, 1, n, 1, s, p[iprev], &alpha2[i]);
bblas_dcpaxpy_comb(bm, 1, n, 1, fp_mone, &alpha1[iprev], &alpha2[i], s, p[iprev], r[iprev], x[iprev], r[i], x[i]);
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
/* u[i+1] = M^-1 * r[i+1] */
// bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
/* alpha1[i+1] = <r, u> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &alpha1[i]);
/* p[i+1] = u[i+1] + transpose(beta[i]) * p[i] */
bblas_extm_daxpy(1, bm, 1, n, 1, &alpha1[i], &alpha1[iprev], p[iprev], u[i], p[i]);
#pragma omp taskwait
stop_timer(&elapses[k]);
alpha1[iprev] = alpha2[iprev] = (double) 0;
// BLAS_gemm(OMPSSBLAS_TRANSP, OMPSSBLAS_NTRANSP, 1, 1, n, FP_ONE, p[i], n, s, n, FP_NOUGHT, &orth, 1);
// orth = FP_ABS(orth);
// if (isgreater(orth, porth * orth_fac)){
// fprintf(stderr, "orth fail %E %E\n", orth, porth*orth_fac);
// break;
// }
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[k] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
// fprintf(stdout, "%d %E\n", k, residuals[k]);
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg1_bp.log", k, residuals, elapses);
// free(pool);
free(alpha1);
free(alpha2);
free(residuals);
free(elapses);
return 0;
}
/* Chronopoulos PCG */
int CG_ALG3_BP(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, double orth_fac, int cglog_level, double *pool)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
// double *pool = calloc(6 * 2 * n, sizeof(double));
// #pragma omp register ([6*2*n]pool)
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha * s_i
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u = M^-1 * r_i+1
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w = A * u_i+1
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta * p_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i+1 = w_i+1 + beta * s_i
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
/* gamma[0] = <r[0], u[0]> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &gamma[i]);
/* alpha[0] = gamma[0]/<w[0], u[0]> */
bblas_ddot(1, bm, 1, n, 1, w[i], u[i], &alpha[i]);
#pragma omp taskwait on (gamma[i], alpha[i])
alpha[i] = gamma[i] / alpha[i];
int k;
for ( k = 0; k < cgit; k++ ) {
start_timer();
int iprev = i;
i = i ^ 0x1;
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *pp0 = &(p[iprev])[j];
double *up0 = &(u[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *pp1 = &(p[i])[j];
double *up1 = &(u[i])[j];
double *sp1 = &(s[i])[j];
double *wp1 = &(w[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
#pragma omp task out([c]pp1, [c]sp1, [c]xp1, [c]rp1) priority(1) label(alg3_fuse)
{
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[iprev], pp0, 1, pp1, 1);
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[iprev], sp0, 1, sp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[iprev], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[iprev], sp1, 1, rp1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
/* u[i+1] = M^-1 * r[i+1] */
dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
/* w[i+1] = A * u[i+1] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
cg_ddot2(1, bm, 1, n, 1, r[i], u[i], &gamma[i], w[i], u[i], &delta);
#pragma omp taskwait
beta[i] = gamma[i] / gamma[iprev];
alpha[i] = gamma[i]/(delta - beta[i] * gamma[i] / alpha[iprev]);
stop_timer(&elapses[k]);
gamma[iprev] = delta = (double) 0;
//TODO Implement p-orthogonality check
#if 0
BLAS_gemm(OMPSSBLAS_TRANSP, OMPSSBLAS_NTRANSP, 1, 1, n, FP_ONE, p[i], n, s, n, FP_NOUGHT, &orth, 1);
orth = FP_ABS(orth);
if (isgreater(orth, porth * orth_fac)){
fprintf(stderr, "orth fail %E %E\n", orth, porth*orth_fac);
break;
}
#endif
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[k] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
// fprintf(stdout, "%d %E\n", k, residuals[k]);
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg3_bp.log", k, residuals, elapses);
// free(pool);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
/* Pipelined PCG */
int CG_ALG4_BP(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, double orth_fac, int cglog_level, double *pool)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
// double *pool = calloc(10 * 2 * n, sizeof(double));
// #pragma omp register ([10*2*n]pool)
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *m[2] = {&pool[offs], &pool[offs+n]}; // m_i = M^-1 * w_i
offs += 2 * n;
double *n0[2] = {&pool[offs], &pool[offs+n]}; // n_i = A * m_i
offs += 2 * n;
double *z[2] = {&pool[offs], &pool[offs+n]}; // z_i = n_i + beta_i * z_i-1
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
int k;
for ( k = 0; k < cgit; k++ ) {
start_timer();
int iprev = i;
i = i ^ 0x1;
/* m[i] = M^-1 * w[i] */
dcholsolv2_nested(1, bm, n, S, N, w[iprev], m[i]);
/* n[i] = A * m[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, m[i], fp_nought, n0[i]);
/*
* gamma[i] = <r[i], u[i]>
* delta = <w[i], u[i]>
*/
cg_ddot2(1, bm, 1, n, 1, r[iprev], u[iprev], &gamma[i], w[iprev], u[iprev], &delta);
#pragma omp taskwait on(gamma[i], delta)
if ( k > 0 ) {
beta[i] = gamma[i]/gamma[iprev];
alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
beta[i] = (double) 0;
alpha[i] = gamma[i]/delta;
}
gamma[iprev] = delta = 0;
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *zp0 = &(z[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *zp1 = &(z[i])[j];
double *qp1 = &(q[i])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
double *mp1 = &(m[i])[j];
double *np1 = &(n0[i])[j];
#pragma omp task out([c]zp1, [c]qp1, [c]sp1, [c]pp1, [c]xp1, [c]rp1, [c]up1, [c]wp1) \
in([c]zp0, [c]qp0, [c]sp0, [c]pp0, [c]np1, [c]mp1, [c]up0, [c]xp0, [c]wp0, [c]rp0) \
priority(1) label(alg4_fuse)
{
/* z_i = n_i + beta_i * z_i-1 */
BLAS_cp(c, np1, 1, zp1, 1);
BLAS_axpy(c, beta[i], zp0, 1, zp1, 1);
/* q_i = m_i + beta_i * q_i-1 */
BLAS_cp(c, mp1, 1, qp1, 1);
BLAS_axpy(c, beta[i], qp0, 1, qp1, 1);
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp1, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
/* w_i+1 = w_i - alpha_i * z_i */
BLAS_cp(c, wp0, 1, wp1, 1);
BLAS_axpy(c, -1*alpha[i], zp1, 1, wp1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
#pragma omp taskwait
// gamma[iprev] = delta = 0;
stop_timer(&elapses[k]);
//TODO Implement p-orthogonality check
#if 0
BLAS_gemm(OMPSSBLAS_TRANSP, OMPSSBLAS_NTRANSP, 1, 1, n, FP_ONE, p[i], n, s, n, FP_NOUGHT, &orth, 1);
orth = FP_ABS(orth);
if (isgreater(orth, porth * orth_fac)){
fprintf(stderr, "orth fail %E %E\n", orth, porth*orth_fac);
break;
}
#endif
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[k] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
// fprintf(stdout, "%d %E\n", k, residuals[k]);
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg4_bp.log", k, residuals, elapses);
// free(pool);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
/* Gropp PCG */
int CG_ALG7_BP(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, double orth_fac, int cglog_level, double *pool)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
// double *pool = calloc(7 * 2 * n, sizeof(double));
// #pragma omp register ([7*2*n]pool)
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
/* p[0] = u[0] */
bblas_dcopy(1, bm, 1, n, 1, u[i], p[i]);
/* s[0] = A * p[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, p[i], fp_nought, s[i]);
/* gamma[0] = <r[0], u[0]> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &gamma[i]);
int k;
for ( k = 0; k < cgit; k++ ) {
start_timer();
int iprev = i;
i = i ^ 0x1;
/* delta = <p[i], s[i]> */
bblas_ddot(1, bm, 1, n, 1, p[iprev], s[iprev], &delta);
/* q[i] = M^-1 * s[i] */
dcholsolv2_nested(1, bm, n, S, N, s[iprev], q[i]);
#pragma omp taskwait on(delta)
alpha[i] = gamma[iprev]/delta;
/* Axpy fuse x,r,u */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *xp0 = &(x[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *up0 = &(u[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *xp1 = &(x[i])[j];
double *pp1 = &(p[i])[j];
double *rp1 = &(r[i])[j];
double *sp1 = &(s[i])[j];
double *up1 = &(u[i])[j];
double *qp1 = &(q[i])[j];
#pragma omp task in([c]qp1) out([c]xp1, [c]rp1, [c]up1) priority(1) label(alg7_fuse0)
{
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp0, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp0, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
/* gamma[i+1] = <r[i+1], u[i+1]> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &gamma[i]);
/* w[i+1] = A * u[i+1] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
#pragma omp taskwait on(gamma[i])
beta[i] = gamma[i]/gamma[iprev];
/* Axpy fuse p,s */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *pp0 = &(p[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *pp1 = &(p[i])[j];
double *sp1 = &(s[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
#pragma omp task in([c]up1, [c]pp0, [c]wp1, [c]sp0) priority(1) label(alg7_fuse1)
{
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up1, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp1, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
}
}
#pragma omp taskwait
gamma[iprev] = delta = 0;
stop_timer(&elapses[k]);
//TODO Implement p-orthogonality check
#if 0
BLAS_gemm(OMPSSBLAS_TRANSP, OMPSSBLAS_NTRANSP, 1, 1, n, FP_ONE, p[i], n, s, n, FP_NOUGHT, &orth, 1);
orth = FP_ABS(orth);
if (isgreater(orth, porth * orth_fac)){
fprintf(stderr, "orth fail %E %E\n", orth, porth*orth_fac);
break;
}
#endif
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[k] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
// fprintf(stdout, "%d %E\n", k, residuals[k]);
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg7_bp.log", k, residuals, elapses);
// free(pool);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
/* IFCG BP. */
int CG_IFCG_BP(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, int fuse, double orth_fac, int cglog_level, double *pool)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
// double *pool = calloc(10 * 2 * n, sizeof(double));
// #pragma omp register ([10*2*n]pool)
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *m[2] = {&pool[offs], &pool[offs+n]}; // m_i = M^-1 * w_i
offs += 2 * n;
double *n0[2] = {&pool[offs], &pool[offs+n]}; // n_i = A * m_i
offs += 2 * n;
double *z[2] = {&pool[offs], &pool[offs+n]}; // z_i = n_i + beta_i * z_i-1
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
start_timer();
int kk = 0;
int k;
for ( k = 0; k < cgit; k++) {
int iprev = i;
i = i ^ 0x1;
/* m[i] = M^-1 * w[i] */
// dcholsolv2_blk(1, n, SA, NA, w[iprev], m[i]);
// #pragma omp task in([n](w[iprev])) out([n](m[i])) label(cholsolv2_seq)
// bsblas_dcholsolv2_seq(1, bm, n, S, N, w[iprev], m[i]);
dcholsolv2_nested(1, bm, n, S, N, w[iprev], m[i]);
/* n[i] = A * m[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, m[i], fp_nought, n0[i]);
/*
* gamma[i] = <r[i], u[i]>
* delta = <w[i], u[i]>
*/
cg_ddot2(1, bm, 1, n, 1, r[iprev], u[iprev], &gamma[i], w[iprev], u[iprev], &delta);
#pragma omp taskwait on(gamma[i], delta)
if ( k > 0 ) {
beta[i] = gamma[i]/gamma[iprev];
alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
beta[i] = (double) 0;
alpha[i] = gamma[i]/delta;
}
gamma[iprev] = delta = 0;
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *zp0 = &(z[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *zp1 = &(z[i])[j];
double *qp1 = &(q[i])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
double *mp1 = &(m[i])[j];
double *np1 = &(n0[i])[j];
#pragma omp task out([c]zp1, [c]qp1, [c]sp1, [c]pp1, [c]xp1, [c]rp1, [c]up1, [c]wp1) \
in([c]zp0, [c]qp0, [c]sp0, [c]pp0, [c]np1, [c]mp1, [c]up0, [c]xp0, [c]wp0, [c]rp0) \
priority(1) label(alg4_fuse)
{
/* z_i = n_i + beta_i * z_i-1 */
BLAS_cp(c, np1, 1, zp1, 1);
BLAS_axpy(c, beta[i], zp0, 1, zp1, 1);
/* q_i = m_i + beta_i * q_i-1 */
BLAS_cp(c, mp1, 1, qp1, 1);
BLAS_axpy(c, beta[i], qp0, 1, qp1, 1);
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp1, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
/* w_i+1 = w_i - alpha_i * z_i */
BLAS_cp(c, wp0, 1, wp1, 1);
BLAS_axpy(c, -1*alpha[i], zp1, 1, wp1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
if ( k > 0 && k % fuse == 0 ) {
#pragma omp taskwait
stop_timer(&elapses[kk]);
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[kk] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
kk += 1;
start_timer();
}
}
#pragma omp taskwait
if ( k == cgit ) {
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
stop_timer(&elapses[kk]);
residuals[kk] = sr2norm;
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg4_ifcg_bp.log", kk, residuals, elapses);
// free(pool);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
/* IFCG V2 BP*/
int CG_IFCG_V2_BP(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, int fuse, double orth_fac, int cglog_level, double *pool)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
// double *pool = calloc(10 * 2 * n, sizeof(double));
// #pragma omp register ([10*2*n]pool)
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *m[2] = {&pool[offs], &pool[offs+n]}; // m_i = M^-1 * w_i
offs += 2 * n;
double *n0[2] = {&pool[offs], &pool[offs+n]}; // n_i = A * m_i
offs += 2 * n;
double *z[2] = {&pool[offs], &pool[offs+n]}; // z_i = n_i + beta_i * z_i-1
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
// dcholsolv2_blk(1, n, SA, NA, r[i], u[i]);
dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
// #pragma omp task in([n](r[i])) out([n](u[i])) label(cholsolv2_seq)
// bsblas_dcholsolv2_seq(1, bm, n, S, N, r[i], u[i]);
// bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
start_timer();
int kk = 0;
int k;
for ( k = 0; k < cgit; k++) {
int iprev = i;
i = i ^ 0x1;
/* m[i] = M^-1 * w[i] */
dcholsolv2_nested(1, bm, n, S, N, w[iprev], m[i]);
// dcholsolv2_blk(1, n, SA, NA, w[iprev], m[i]);
// bsblas_dcholsolv2(1, bm, n, S, N, w[iprev], m[i]);
// #pragma omp task in([n](w[iprev])) out([n](m[i])) label(cholsolv2_seq)
// bsblas_dcholsolv2_seq(1, bm, n, S, N, w[iprev], m[i]);
/*
* gamma[i] = <r[i], u[i]>
*/
bblas_ddot(1, bm, 1, n, 1, r[iprev], u[iprev], &gamma[i]);
bblas_ddot(1, bm, 1, n, 1, w[iprev], u[iprev], &delta);
#pragma omp taskwait on(gamma[i])
if ( k > 0 ) {
beta[i] = gamma[i]/gamma[iprev];
// alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
beta[i] = (double) 0;
// alpha[i] = gamma[i]/delta;
}
// gamma[iprev] = delta = 0;
/*
* s_i = w_i + beta_i * s_i-1
* p_i = u_i + beta_i * p_i-1
*/
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
#pragma omp task out([c]sp1, [c]pp1) in([c]sp0, [c]pp0, [c]up0, [c]wp0) priority(1) label(alg4_apxy)
{
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
}
}
/* n[i] = A * m[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, m[i], fp_nought, n0[i]);
// bblas_ddot(1, bm, 1, n, 1, w[iprev], u[iprev], &delta);
#pragma omp taskwait on(delta)
if ( k > 0 ) {
alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
alpha[i] = gamma[i]/delta;
}
gamma[iprev] = delta = 0;
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *zp0 = &(z[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *zp1 = &(z[i])[j];
double *qp1 = &(q[i])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
double *mp1 = &(m[i])[j];
double *np1 = &(n0[i])[j];
#pragma omp task out([c]zp1, [c]qp1, [c]xp1, [c]rp1, [c]up1, [c]wp1) \
in([c]zp0, [c]qp0, [c]sp0, [c]pp0, [c]np1, [c]mp1, [c]up0, [c]xp0, [c]wp0, [c]rp0) \
priority(1) label(alg4_fuse)
{
/* z_i = n_i + beta_i * z_i-1 */
BLAS_cp(c, np1, 1, zp1, 1);
BLAS_axpy(c, beta[i], zp0, 1, zp1, 1);
/* q_i = m_i + beta_i * q_i-1 */
BLAS_cp(c, mp1, 1, qp1, 1);
BLAS_axpy(c, beta[i], qp0, 1, qp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp1, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
/* w_i+1 = w_i - alpha_i * z_i */
BLAS_cp(c, wp0, 1, wp1, 1);
BLAS_axpy(c, -1*alpha[i], zp1, 1, wp1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
if ( k > 0 && k % fuse == 0 ) {
#pragma omp taskwait
stop_timer(&elapses[kk]);
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[kk] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
kk += 1;
start_timer();
}
}
#pragma omp taskwait
if ( k == cgit ) {
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
stop_timer(&elapses[kk]);
residuals[kk] = sr2norm;
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg4_ifcg_v2_bp.log", kk, residuals, elapses);
// free(pool);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
/* TODO ILU */
int CG_ALG1_ILU_BP(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, double orth_fac, int cglog_level, double *pool)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha * p_i
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u = M^-1 * r_i+1
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta * p_i
double *s = malloc(n * sizeof(double)); // s = Ap
double *alpha1 = calloc(2, sizeof(double));
double *alpha2 = calloc(2, sizeof(double));
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
// dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
dcholsolv2_blk(1, n, SA, NA, r[i], u[i]);
/* p[0] = z[0] */
bblas_dcopy(1, bm, 1, n, 1, u[i], p[i]);
/* alpha1[0] = <r[0], z[0]> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &alpha1[i]);
int k;
for ( k = 0; k < cgit; k++ ) {
start_timer();
int iprev = i;
i = i ^ 0x1;
/* s = A * p[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, p[iprev], fp_nought, s);
/* alpha2[i] = <s, p[i]> */
bblas_ddot(1, bm, 1, n, 1, s, p[iprev], &alpha2[i]);
bblas_dcpaxpy_comb(bm, 1, n, 1, fp_mone, &alpha1[iprev], &alpha2[i], s, p[iprev], r[iprev], x[iprev], r[i], x[i]);
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
/* u[i+1] = M^-1 * r[i+1] */
// bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
// dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
dcholsolv2_blk(1, n, SA, NA, r[i], u[i]);
/* alpha1[i+1] = <r, u> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &alpha1[i]);
/* p[i+1] = u[i+1] + transpose(beta[i]) * p[i] */
bblas_extm_daxpy(1, bm, 1, n, 1, &alpha1[i], &alpha1[iprev], p[iprev], u[i], p[i]);
#pragma omp taskwait
stop_timer(&elapses[k]);
alpha1[iprev] = alpha2[iprev] = (double) 0;
// BLAS_gemm(OMPSSBLAS_TRANSP, OMPSSBLAS_NTRANSP, 1, 1, n, FP_ONE, p[i], n, s, n, FP_NOUGHT, &orth, 1);
// orth = FP_ABS(orth);
// if (isgreater(orth, porth * orth_fac)){
// fprintf(stderr, "orth fail %E %E\n", orth, porth*orth_fac);
// break;
// }
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[k] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
// fprintf(stdout, "%d %E\n", k, residuals[k]);
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg1_ilu_bp.log", k, residuals, elapses);
free(alpha1);
free(alpha2);
free(residuals);
free(elapses);
return 0;
}
/* Chronopoulos PCG */
int CG_ALG3_ILU_BP(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, double orth_fac, int cglog_level, double *pool)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha * s_i
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u = M^-1 * r_i+1
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w = A * u_i+1
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta * p_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i+1 = w_i+1 + beta * s_i
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
// dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
dcholsolv2_blk(1, n, SA, NA, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
/* gamma[0] = <r[0], u[0]> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &gamma[i]);
/* alpha[0] = gamma[0]/<w[0], u[0]> */
bblas_ddot(1, bm, 1, n, 1, w[i], u[i], &alpha[i]);
#pragma omp taskwait on (gamma[i], alpha[i])
alpha[i] = gamma[i] / alpha[i];
int k;
for ( k = 0; k < cgit; k++ ) {
start_timer();
int iprev = i;
i = i ^ 0x1;
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *pp0 = &(p[iprev])[j];
double *up0 = &(u[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *pp1 = &(p[i])[j];
double *up1 = &(u[i])[j];
double *sp1 = &(s[i])[j];
double *wp1 = &(w[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
#pragma omp task out([c]pp1, [c]sp1, [c]xp1, [c]rp1) priority(1) label(alg3_fuse)
{
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[iprev], pp0, 1, pp1, 1);
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[iprev], sp0, 1, sp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[iprev], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[iprev], sp1, 1, rp1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
/* u[i+1] = M^-1 * r[i+1] */
// dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
dcholsolv2_blk(1, n, SA, NA, r[i], u[i]);
/* w[i+1] = A * u[i+1] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
cg_ddot2(1, bm, 1, n, 1, r[i], u[i], &gamma[i], w[i], u[i], &delta);
#pragma omp taskwait
beta[i] = gamma[i] / gamma[iprev];
alpha[i] = gamma[i]/(delta - beta[i] * gamma[i] / alpha[iprev]);
stop_timer(&elapses[k]);
gamma[iprev] = delta = (double) 0;
//TODO Implement p-orthogonality check
#if 0
BLAS_gemm(OMPSSBLAS_TRANSP, OMPSSBLAS_NTRANSP, 1, 1, n, FP_ONE, p[i], n, s, n, FP_NOUGHT, &orth, 1);
orth = FP_ABS(orth);
if (isgreater(orth, porth * orth_fac)){
fprintf(stderr, "orth fail %E %E\n", orth, porth*orth_fac);
break;
}
#endif
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[k] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
// fprintf(stdout, "%d %E\n", k, residuals[k]);
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg3_ilu_bp.log", k, residuals, elapses);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
/* Pipelined PCG */
int CG_ALG4_ILU_BP(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, double orth_fac, int cglog_level, double *pool)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *m[2] = {&pool[offs], &pool[offs+n]}; // m_i = M^-1 * w_i
offs += 2 * n;
double *n0[2] = {&pool[offs], &pool[offs+n]}; // n_i = A * m_i
offs += 2 * n;
double *z[2] = {&pool[offs], &pool[offs+n]}; // z_i = n_i + beta_i * z_i-1
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
// dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
dcholsolv2_blk(1, n, SA, NA, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
int k;
for ( k = 0; k < cgit; k++ ) {
start_timer();
int iprev = i;
i = i ^ 0x1;
/* m[i] = M^-1 * w[i] */
// dcholsolv2_nested(1, bm, n, S, N, w[iprev], m[i]);
dcholsolv2_blk(1, n, SA, NA, w[iprev], m[i]);
/* n[i] = A * m[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, m[i], fp_nought, n0[i]);
/*
* gamma[i] = <r[i], u[i]>
* delta = <w[i], u[i]>
*/
cg_ddot2(1, bm, 1, n, 1, r[iprev], u[iprev], &gamma[i], w[iprev], u[iprev], &delta);
#pragma omp taskwait on(gamma[i], delta)
if ( k > 0 ) {
beta[i] = gamma[i]/gamma[iprev];
alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
beta[i] = (double) 0;
alpha[i] = gamma[i]/delta;
}
gamma[iprev] = delta = 0;
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *zp0 = &(z[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *zp1 = &(z[i])[j];
double *qp1 = &(q[i])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
double *mp1 = &(m[i])[j];
double *np1 = &(n0[i])[j];
#pragma omp task out([c]zp1, [c]qp1, [c]sp1, [c]pp1, [c]xp1, [c]rp1, [c]up1, [c]wp1) \
in([c]zp0, [c]qp0, [c]sp0, [c]pp0, [c]np1, [c]mp1, [c]up0, [c]xp0, [c]wp0, [c]rp0) \
priority(1) label(alg4_fuse)
{
/* z_i = n_i + beta_i * z_i-1 */
BLAS_cp(c, np1, 1, zp1, 1);
BLAS_axpy(c, beta[i], zp0, 1, zp1, 1);
/* q_i = m_i + beta_i * q_i-1 */
BLAS_cp(c, mp1, 1, qp1, 1);
BLAS_axpy(c, beta[i], qp0, 1, qp1, 1);
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp1, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
/* w_i+1 = w_i - alpha_i * z_i */
BLAS_cp(c, wp0, 1, wp1, 1);
BLAS_axpy(c, -1*alpha[i], zp1, 1, wp1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
#pragma omp taskwait
// gamma[iprev] = delta = 0;
stop_timer(&elapses[k]);
//TODO Implement p-orthogonality check
#if 0
BLAS_gemm(OMPSSBLAS_TRANSP, OMPSSBLAS_NTRANSP, 1, 1, n, FP_ONE, p[i], n, s, n, FP_NOUGHT, &orth, 1);
orth = FP_ABS(orth);
if (isgreater(orth, porth * orth_fac)){
fprintf(stderr, "orth fail %E %E\n", orth, porth*orth_fac);
break;
}
#endif
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[k] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
// fprintf(stdout, "%d %E\n", k, residuals[k]);
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg4_ilu_bp.log", k, residuals, elapses);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
/* Gropp PCG */
int CG_ALG7_ILU_BP(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, double orth_fac, int cglog_level, double *pool)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
// dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
dcholsolv2_blk(1, n, SA, NA, r[i], u[i]);
/* p[0] = u[0] */
bblas_dcopy(1, bm, 1, n, 1, u[i], p[i]);
/* s[0] = A * p[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, p[i], fp_nought, s[i]);
/* gamma[0] = <r[0], u[0]> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &gamma[i]);
int k;
for ( k = 0; k < cgit; k++ ) {
start_timer();
int iprev = i;
i = i ^ 0x1;
/* delta = <p[i], s[i]> */
bblas_ddot(1, bm, 1, n, 1, p[iprev], s[iprev], &delta);
/* q[i] = M^-1 * s[i] */
// dcholsolv2_nested(1, bm, n, S, N, s[iprev], q[i]);
dcholsolv2_blk(1, n, SA, NA, s[iprev], q[i]);
#pragma omp taskwait on(delta)
alpha[i] = gamma[iprev]/delta;
/* Axpy fuse x,r,u */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *xp0 = &(x[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *up0 = &(u[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *xp1 = &(x[i])[j];
double *pp1 = &(p[i])[j];
double *rp1 = &(r[i])[j];
double *sp1 = &(s[i])[j];
double *up1 = &(u[i])[j];
double *qp1 = &(q[i])[j];
#pragma omp task in([c]qp1) out([c]xp1, [c]rp1, [c]up1) priority(1) label(alg7_fuse0)
{
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp0, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp0, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
/* gamma[i+1] = <r[i+1], u[i+1]> */
bblas_ddot(1, bm, 1, n, 1, r[i], u[i], &gamma[i]);
/* w[i+1] = A * u[i+1] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
#pragma omp taskwait on(gamma[i])
beta[i] = gamma[i]/gamma[iprev];
/* Axpy fuse p,s */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *pp0 = &(p[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *pp1 = &(p[i])[j];
double *sp1 = &(s[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
#pragma omp task in([c]up1, [c]pp0, [c]wp1, [c]sp0) priority(1) label(alg7_fuse1)
{
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up1, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp1, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
}
}
#pragma omp taskwait
gamma[iprev] = delta = 0;
stop_timer(&elapses[k]);
//TODO Implement p-orthogonality check
#if 0
BLAS_gemm(OMPSSBLAS_TRANSP, OMPSSBLAS_NTRANSP, 1, 1, n, FP_ONE, p[i], n, s, n, FP_NOUGHT, &orth, 1);
orth = FP_ABS(orth);
if (isgreater(orth, porth * orth_fac)){
fprintf(stderr, "orth fail %E %E\n", orth, porth*orth_fac);
break;
}
#endif
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[k] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
// fprintf(stdout, "%d %E\n", k, residuals[k]);
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg7_ilu_bp.log", k, residuals, elapses);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
/* IFCG ILU */
int CG_IFCG_ILU_BP(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, int fuse, double orth_fac, int cglog_level, double *pool)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *m[2] = {&pool[offs], &pool[offs+n]}; // m_i = M^-1 * w_i
offs += 2 * n;
double *n0[2] = {&pool[offs], &pool[offs+n]}; // n_i = A * m_i
offs += 2 * n;
double *z[2] = {&pool[offs], &pool[offs+n]}; // z_i = n_i + beta_i * z_i-1
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
// dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
dcholsolv2_blk(1, n, SA, NA, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
start_timer();
int kk = 0;
int k;
for ( k = 0; k < cgit; k++) {
int iprev = i;
i = i ^ 0x1;
/* m[i] = M^-1 * w[i] */
dcholsolv2_blk(1, n, SA, NA, w[iprev], m[i]);
// dcholsolv2_nested(1, bm, n, S, N, w[iprev], m[i]);
/* n[i] = A * m[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, m[i], fp_nought, n0[i]);
/*
* gamma[i] = <r[i], u[i]>
* delta = <w[i], u[i]>
*/
cg_ddot2(1, bm, 1, n, 1, r[iprev], u[iprev], &gamma[i], w[iprev], u[iprev], &delta);
#pragma omp taskwait on(gamma[i], delta)
if ( k > 0 ) {
beta[i] = gamma[i]/gamma[iprev];
alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
beta[i] = (double) 0;
alpha[i] = gamma[i]/delta;
}
gamma[iprev] = delta = 0;
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *zp0 = &(z[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *zp1 = &(z[i])[j];
double *qp1 = &(q[i])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
double *mp1 = &(m[i])[j];
double *np1 = &(n0[i])[j];
#pragma omp task out([c]zp1, [c]qp1, [c]sp1, [c]pp1, [c]xp1, [c]rp1, [c]up1, [c]wp1) \
in([c]zp0, [c]qp0, [c]sp0, [c]pp0, [c]np1, [c]mp1, [c]up0, [c]xp0, [c]wp0, [c]rp0) \
priority(1) label(alg4_fuse)
{
/* z_i = n_i + beta_i * z_i-1 */
BLAS_cp(c, np1, 1, zp1, 1);
BLAS_axpy(c, beta[i], zp0, 1, zp1, 1);
/* q_i = m_i + beta_i * q_i-1 */
BLAS_cp(c, mp1, 1, qp1, 1);
BLAS_axpy(c, beta[i], qp0, 1, qp1, 1);
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp1, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
/* w_i+1 = w_i - alpha_i * z_i */
BLAS_cp(c, wp0, 1, wp1, 1);
BLAS_axpy(c, -1*alpha[i], zp1, 1, wp1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
if ( k > 0 && k % fuse == 0 ) {
#pragma omp taskwait
stop_timer(&elapses[kk]);
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[kk] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
kk += 1;
start_timer();
}
}
#pragma omp taskwait
if ( k == cgit ) {
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
stop_timer(&elapses[kk]);
residuals[kk] = sr2norm;
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg4_ifcg_ilu_bp.log", kk, residuals, elapses);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
/* IFCG V2 BP*/
int CG_IFCG_V2_ILU_BP(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, int fuse, double orth_fac, int cglog_level, double *pool)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *m[2] = {&pool[offs], &pool[offs+n]}; // m_i = M^-1 * w_i
offs += 2 * n;
double *n0[2] = {&pool[offs], &pool[offs+n]}; // n_i = A * m_i
offs += 2 * n;
double *z[2] = {&pool[offs], &pool[offs+n]}; // z_i = n_i + beta_i * z_i-1
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
dcholsolv2_blk(1, n, SA, NA, r[i], u[i]);
// dcholsolv2_nested(1, bm, n, S, N, r[i], u[i]);
// bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
start_timer();
int kk = 0;
int k;
for ( k = 0; k < cgit; k++) {
int iprev = i;
i = i ^ 0x1;
/* m[i] = M^-1 * w[i] */
dcholsolv2_blk(1, n, SA, NA, w[iprev], m[i]);
// dcholsolv2_nested(1, bm, n, S, N, w[iprev], m[i]);
// bsblas_dcholsolv2(1, bm, n, S, N, w[iprev], m[i]);
/*
* gamma[i] = <r[i], u[i]>
*/
bblas_ddot(1, bm, 1, n, 1, r[iprev], u[iprev], &gamma[i]);
#pragma omp taskwait on(gamma[i])
if ( k > 0 ) {
beta[i] = gamma[i]/gamma[iprev];
// alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
beta[i] = (double) 0;
// alpha[i] = gamma[i]/delta;
}
// gamma[iprev] = delta = 0;
/*
* s_i = w_i + beta_i * s_i-1
* p_i = u_i + beta_i * p_i-1
*/
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
#pragma omp task out([c]sp1, [c]pp1) in([c]sp0, [c]pp0, [c]up0, [c]wp0) priority(1) label(alg4_apxy)
{
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
}
}
/* n[i] = A * m[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, m[i], fp_nought, n0[i]);
bblas_ddot(1, bm, 1, n, 1, w[iprev], u[iprev], &delta);
// cg_ddot2(1, bm, 1, n, 1, r[iprev], u[iprev], &gamma[i], w[iprev], u[iprev], &delta);
#pragma omp taskwait on(delta)
if ( k > 0 ) {
alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
alpha[i] = gamma[i]/delta;
}
gamma[iprev] = delta = 0;
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *zp0 = &(z[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *zp1 = &(z[i])[j];
double *qp1 = &(q[i])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
double *mp1 = &(m[i])[j];
double *np1 = &(n0[i])[j];
#pragma omp task out([c]zp1, [c]qp1, [c]xp1, [c]rp1, [c]up1, [c]wp1) \
in([c]zp0, [c]qp0, [c]sp0, [c]pp0, [c]np1, [c]mp1, [c]up0, [c]xp0, [c]wp0, [c]rp0) \
priority(1) label(alg4_fuse)
{
/* z_i = n_i + beta_i * z_i-1 */
BLAS_cp(c, np1, 1, zp1, 1);
BLAS_axpy(c, beta[i], zp0, 1, zp1, 1);
/* q_i = m_i + beta_i * q_i-1 */
BLAS_cp(c, mp1, 1, qp1, 1);
BLAS_axpy(c, beta[i], qp0, 1, qp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp1, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
/* w_i+1 = w_i - alpha_i * z_i */
BLAS_cp(c, wp0, 1, wp1, 1);
BLAS_axpy(c, -1*alpha[i], zp1, 1, wp1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
if ( k > 0 && k % fuse == 0 ) {
#pragma omp taskwait
stop_timer(&elapses[kk]);
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[kk] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
kk += 1;
start_timer();
}
}
#pragma omp taskwait
if ( k == cgit ) {
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
stop_timer(&elapses[kk]);
residuals[kk] = sr2norm;
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg4_ifcg_v2_ilu_bp.log", kk, residuals, elapses);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
/* Pipelined PCG V3*/
int CG_ALG4_V4(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, int fuse, double orth_fac, int cglog_level)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int offs = 0;
double *pool = calloc(10 * 2 * n, sizeof(double));
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *m[2] = {&pool[offs], &pool[offs+n]}; // m_i = M^-1 * w_i
offs += 2 * n;
double *n0[2] = {&pool[offs], &pool[offs+n]}; // n_i = A * m_i
offs += 2 * n;
double *z[2] = {&pool[offs], &pool[offs+n]}; // z_i = n_i + beta_i * z_i-1
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(double));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
start_timer();
int kk = 0;
int k;
for ( k = 0; k < cgit; k++) {
int iprev = i;
i = i ^ 0x1;
/* m[i] = M^-1 * w[i] */
bsblas_dcholsolv2(1, bm, n, S, N, w[iprev], m[i]);
/* n[i] = A * m[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, m[i], fp_nought, n0[i]);
/*
* gamma[i] = <r[i], u[i]>
* delta = <w[i], u[i]>
*/
cg_ddot2_commutative(1, bm, 1, n, 1, r[iprev], u[iprev], &gamma[i], w[iprev], u[iprev], &delta);
#pragma omp taskwait on(gamma[i], delta)
if ( k > 0 ) {
beta[i] = gamma[i]/gamma[iprev];
alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
beta[i] = (double) 0;
alpha[i] = gamma[i]/delta;
}
gamma[iprev] = delta = 0;
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *zp0 = &(z[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *zp1 = &(z[i])[j];
double *qp1 = &(q[i])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
double *mp1 = &(m[i])[j];
double *np1 = &(n0[i])[j];
#pragma omp task out([c]zp1, [c]qp1, [c]sp1, [c]pp1, [c]xp1, [c]rp1, [c]up1, [c]wp1) \
in([c]zp0, [c]qp0, [c]sp0, [c]pp0, [c]np1, [c]mp1, [c]up0, [c]xp0, [c]wp0, [c]rp0) \
priority(1) label(alg4_fuse)
{
/* z_i = n_i + beta_i * z_i-1 */
BLAS_cp(c, np1, 1, zp1, 1);
BLAS_axpy(c, beta[i], zp0, 1, zp1, 1);
/* q_i = m_i + beta_i * q_i-1 */
BLAS_cp(c, mp1, 1, qp1, 1);
BLAS_axpy(c, beta[i], qp0, 1, qp1, 1);
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp1, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
/* w_i+1 = w_i - alpha_i * z_i */
BLAS_cp(c, wp0, 1, wp1, 1);
BLAS_axpy(c, -1*alpha[i], zp1, 1, wp1, 1);
}
}
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
if ( k > 0 && k % fuse == 0 ) {
#pragma omp taskwait
stop_timer(&elapses[kk]);
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[kk] = sr2norm;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
kk += 1;
start_timer();
}
}
#pragma omp taskwait
if ( k == cgit ) {
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
stop_timer(&elapses[kk]);
residuals[kk] = sr2norm;
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info("cg_alg4_v3.log", kk, residuals, elapses);
free(pool);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
#define STRIDE 10
#define SFUSE 10
/* Pipelined PCG Auto-tuning */
int CG_ALG4_AT(void *A, void *Ahbh, double *solution, double *b, int cgit, int bm, double prec, int ACCIMP, int fuse, double orth_fac, int cglog_level)
{
hbmat_t *Ahb = (hbmat_t*) A;
int n = Ahb->m;
int FUSE = SFUSE;
int HALT = FUSE;
int trial = 1;
int offs = 0;
double *pool = calloc(10 * 2 * n, sizeof(double));
double *x[2] = {&pool[offs], &pool[offs+n]};
offs += 2 * n;
double *u[2] = {&pool[offs], &pool[offs+n]}; // u_i+1 = u_i - alpha_i * q_i
offs += 2 * n;
double *w[2] = {&pool[offs], &pool[offs+n]}; // w_i+1 = w_i - alpha_i * z_i
offs += 2 * n;
double *p[2] = {&pool[offs], &pool[offs+n]}; // p_i+1 = u_i+1 + beta_i * p_i
offs += 2 * n;
double *m[2] = {&pool[offs], &pool[offs+n]}; // m_i = M^-1 * w_i
offs += 2 * n;
double *n0[2] = {&pool[offs], &pool[offs+n]}; // n_i = A * m_i
offs += 2 * n;
double *z[2] = {&pool[offs], &pool[offs+n]}; // z_i = n_i + beta_i * z_i-1
offs += 2 * n;
double *q[2] = {&pool[offs], &pool[offs+n]}; // q_i = m_i + beta_i * q_i-1
offs += 2 * n;
double *r[2] = {&pool[offs], &pool[offs+n]}; // r_i+1 = r_i - alpha_i * s_i
offs += 2 * n;
double *s[2] = {&pool[offs], &pool[offs+n]}; // s_i = w_i + beta_i * s_i-1
double *alpha = calloc(2, sizeof(double));
double *beta = calloc(2, sizeof(double));
double *gamma = calloc(2, sizeof(double));
double delta = (double) 0;
double orth;
double porth = DBL_MAX;
double norm_b = cblas_ddot(n, b, 1, b, 1);
double *residuals = malloc(cgit * sizeof(double));
unsigned int *elapses = malloc(cgit * sizeof(unsigned int));
int *fuses = calloc(cgit, sizeof(int));
int *iters = calloc(cgit, sizeof(int));
int i = 0;
/* r[0] = b */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
/* r[0] = b - A * x[0] */
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
/* u[0] = M^-1 * r[0] */
bsblas_dcholsolv2(1, bm, n, S, N, r[i], u[i]);
/* w[0] = A * u[0] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, u[i], fp_nought, w[i]);
start_timer();
int kk = 0;
int k;
for ( k = 0; k < cgit; k++) {
int iprev = i;
i = i ^ 0x1;
/* m[i] = M^-1 * w[i] */
bsblas_dcholsolv2(1, bm, n, S, N, w[iprev], m[i]);
/* n[i] = A * m[i] */
hbsblas_dcsrmv(1, bm, fp_one, Ahbh, m[i], fp_nought, n0[i]);
/*
* gamma[i] = <r[i], u[i]>
* delta = <w[i], u[i]>
*/
cg_ddot2(1, bm, 1, n, 1, r[iprev], u[iprev], &gamma[i], w[iprev], u[iprev], &delta);
#pragma omp taskwait on(gamma[i], delta)
if ( k > 0 ) {
beta[i] = gamma[i]/gamma[iprev];
alpha[i] = gamma[i] / (delta - beta[i] * gamma[i] / alpha[iprev]);
} else {
beta[i] = (double) 0;
alpha[i] = gamma[i]/delta;
}
gamma[iprev] = delta = 0;
/* Grand fuse */
for (int j = 0; j < n; j += bm ) {
int cs = n - j;
int c = cs < bm ? cs : bm;
double *zp0 = &(z[iprev])[j];
double *qp0 = &(q[iprev])[j];
double *sp0 = &(s[iprev])[j];
double *pp0 = &(p[iprev])[j];
double *xp0 = &(x[iprev])[j];
double *rp0 = &(r[iprev])[j];
double *up0 = &(u[iprev])[j];
double *wp0 = &(w[iprev])[j];
double *zp1 = &(z[i])[j];
double *qp1 = &(q[i])[j];
double *sp1 = &(s[i])[j];
double *pp1 = &(p[i])[j];
double *xp1 = &(x[i])[j];
double *rp1 = &(r[i])[j];
double *up1 = &(u[i])[j];
double *wp1 = &(w[i])[j];
double *mp1 = &(m[i])[j];
double *np1 = &(n0[i])[j];
#pragma omp task out([c]zp1, [c]qp1, [c]sp1, [c]pp1, [c]xp1, [c]rp1, [c]up1, [c]wp1) \
in([c]zp0, [c]qp0, [c]sp0, [c]pp0, [c]np1, [c]mp1, [c]up0, [c]xp0, [c]wp0, [c]rp0) \
priority(1) label(alg4_fuse)
{
/* z_i = n_i + beta_i * z_i-1 */
BLAS_cp(c, np1, 1, zp1, 1);
BLAS_axpy(c, beta[i], zp0, 1, zp1, 1);
/* q_i = m_i + beta_i * q_i-1 */
BLAS_cp(c, mp1, 1, qp1, 1);
BLAS_axpy(c, beta[i], qp0, 1, qp1, 1);
/* s_i = w_i + beta_i * s_i-1 */
BLAS_cp(c, wp0, 1, sp1, 1);
BLAS_axpy(c, beta[i], sp0, 1, sp1, 1);
/* p_i = u_i + beta_i * p_i-1 */
BLAS_cp(c, up0, 1, pp1, 1);
BLAS_axpy(c, beta[i], pp0, 1, pp1, 1);
/* x_i+1 = x_i + alpha_i * p_i */
BLAS_cp(c, xp0, 1, xp1, 1);
BLAS_axpy(c, alpha[i], pp1, 1, xp1, 1);
/* r_i+1 = r_i - alpha_i * s_i */
BLAS_cp(c, rp0, 1, rp1, 1);
BLAS_axpy(c, -1*alpha[i], sp1, 1, rp1, 1);
/* u_i+1 = u_i - alpha_i * q_i */
BLAS_cp(c, up0, 1, up1, 1);
BLAS_axpy(c, -1*alpha[i], qp1, 1, up1, 1);
/* w_i+1 = w_i - alpha_i * z_i */
BLAS_cp(c, wp0, 1, wp1, 1);
BLAS_axpy(c, -1*alpha[i], zp1, 1, wp1, 1);
}
}
#if 0
/* Accuracy improvement */
if ( k > 0 && k % ACCIMP == 0 ) {
#pragma omp taskwait
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
}
#endif
if ( k == HALT ) {
#pragma omp taskwait
/* Accuracy improvement */
bblas_dcopy(1, bm, 1, n, 1, b, r[i]);
hbsblas_dcsrmv(1, bm, fp_mone, Ahbh, x[i], fp_one, r[i]);
#pragma omp taskwait
stop_timer(&elapses[kk]);
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
residuals[kk] = sr2norm;
fuses[kk] = FUSE;
iters[kk] = k;
if ( isless(sr2norm, prec) ) {
fprintf(stderr, "Precision reached\n");
break;
}
/* Fuse auto-tuning */
if ( trial ) {
if ( kk == 0 ) {
FUSE += STRIDE;
} else {
int elp_inc = ( elapses[kk] <= elapses[kk-1] );
int res_inc = ( isless(residuals[kk], residuals[kk-1]) );
if (elp_inc && res_inc) {
FUSE += STRIDE;
} else {
FUSE -= STRIDE;
trial = 0;
fprintf(stderr, "Trial turned off\n");
}
}
}
HALT += FUSE;
// printf("k: %d kk: %d FUSE: %d\n", k, kk, FUSE);
// printf("elp[%d] %d elp[%d] %d\n", kk, elapses[kk], kk-1, elapses[kk-1]);
kk += 1;
start_timer();
}
}
#pragma omp taskwait
if ( k == cgit ) {
double norm_r = sqrt(cblas_ddot(n, r[i], 1, r[i], 1));
double sr2norm = norm_r/norm_b;
stop_timer(&elapses[kk]);
residuals[kk] = sr2norm;
fuses[kk] = FUSE;
}
memcpy(solution, x[i], n * sizeof(double));
if ( cglog_level )
dump_info4("cg_alg4_at.log", kk, residuals, elapses, fuses, iters);
free(pool);
free(alpha);
free(beta);
free(gamma);
free(residuals);
free(elapses);
return 0;
}
|
GB_binop__le_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int16)
// A*D function (colscale): GB (_AxD__le_int16)
// D*A function (rowscale): GB (_DxB__le_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__le_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__le_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int16)
// C=scalar+B GB (_bind1st__le_int16)
// C=scalar+B' GB (_bind1st_tran__le_int16)
// C=A+scalar GB (_bind2nd__le_int16)
// C=A'+scalar GB (_bind2nd_tran__le_int16)
// C type: bool
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_INT16 || GxB_NO_LE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__le_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__le_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
singleFlowConflict.c | int main() {
// int X = 0;
int *A, *B, *C;
int p[20], q[20];
int X = 0;
#pragma omp parallel
{
#pragma omp atomic
X = X +1;
#pragma omp single nowait
{
C = A;
A = B;
B = C;
}
#pragma omp barrier
int i = 10;
B[i] = A[i] + 10;
}
}
|
zherk.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_herk
*
* Performs one of the Hermitian rank k operations
*
* \f[ C = \alpha A \times A^H + \beta C, \f]
* or
* \f[ C = \alpha A^H \times A + \beta C, \f]
*
* where alpha and beta are real scalars, C is an n-by-n Hermitian
* matrix, and A is an n-by-k matrix in the first case and a k-by-n
* matrix in the second case.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans: \f[ C = \alpha A \times A^H + \beta C; \f]
* - PlasmaConjTrans: \f[ C = \alpha A^H \times A + \beta C. \f]
*
* @param[in] n
* The order of the matrix C. n >= 0.
*
* @param[in] k
* If trans = PlasmaNoTrans, number of columns of the A matrix;
* if trans = PlasmaConjTrans, number of rows of the A matrix.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* A is an lda-by-ka matrix.
* If trans = PlasmaNoTrans, ka = k;
* if trans = PlasmaConjTrans, ka = n.
*
* @param[in] lda
* The leading dimension of the array A.
* If trans = PlasmaNoTrans, lda >= max(1, n);
* if trans = PlasmaConjTrans, lda >= max(1, k).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] pC
* C is an ldc-by-n matrix.
* On exit, the uplo part of the matrix is overwritten
* by the uplo part of the updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1, n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_zherk
* @sa plasma_cherk
*
******************************************************************************/
int plasma_zherk(plasma_enum_t uplo, plasma_enum_t trans,
int n, int k,
double alpha, plasma_complex64_t *pA, int lda,
double beta, plasma_complex64_t *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaConjTrans)) {
plasma_error("illegal value of trans");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (k < 0) {
plasma_error("illegal value of k");
return -4;
}
int am, an;
if (trans == PlasmaNoTrans) {
am = n;
an = k;
}
else {
am = k;
an = n;
}
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldc < imax(1, n)) {
plasma_error("illegal value of ldc");
return -10;
}
// quick return
if (n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_syrk(plasma, PlasmaComplexDouble, n, k);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize tile matrix descriptors.
plasma_desc_t A;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
am, an, 0, 0, am, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, n, 0, 0, n, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, &sequence, &request);
plasma_omp_zge2desc(pC, ldc, C, &sequence, &request);
// Call the tile async function.
plasma_omp_zherk(uplo, trans,
alpha, A,
beta, C,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(C, pC, ldc, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&C);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_herk
*
* Performs rank k update.
* Non-blocking tile version of plasma_zherk().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans: \f[ C = \alpha A \times A^H + \beta C; \f]
* - PlasmaConjTrans: \f[ C = \alpha A^H \times A + \beta C. \f]
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* Descriptor of matrix C.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zherk
* @sa plasma_omp_zherk
* @sa plasma_omp_cherk
* @sa plasma_omp_dherk
* @sa plasma_omp_sherk
*
******************************************************************************/
void plasma_omp_zherk(plasma_enum_t uplo, plasma_enum_t trans,
double alpha, plasma_desc_t A,
double beta, plasma_desc_t C,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaConjTrans)) {
plasma_error("illegal value of trans");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
int k = trans == PlasmaNoTrans ? A.n : A.m;
if (C.m == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return;
// Call the parallel function.
plasma_pzherk(uplo, trans,
alpha, A,
beta, C,
sequence, request);
}
|
a.9.1.c | /* { dg-do compile } */
void XAXIS ();
void YAXIS ();
void ZAXIS ();
void
a9 ()
{
#pragma omp parallel sections
{
#pragma omp section
XAXIS ();
#pragma omp section
YAXIS ();
#pragma omp section
ZAXIS ();
}
}
|
black-scholes.c | /*
* Copyright (C) 2014-2015, 2018 Intel Corporation
*
* SPDX-License-Identifier: MIT
*/
#include <omp.h>
#include <mathimf.h>
#include "euro_opt.h"
#ifdef __DO_FLOAT__
# define EXP(x) expf(x)
# define LOG(x) logf(x)
# define SQRT(x) sqrtf(x)
# define ERF(x) erff(x)
# define INVSQRT(x) 1.0f/sqrtf(x)
# define QUARTER 0.25f
# define HALF 0.5f
# define TWO 2.0f
#else
# define EXP(x) exp(x)
# define LOG(x) log(x)
# define SQRT(x) sqrt(x)
# define ERF(x) erf(x)
# define INVSQRT(x) 1.0/sqrt(x)
# define QUARTER 0.25
# define HALF 0.5
# define TWO 2.0
#endif
/*
// This function computes the Black-Scholes formula.
// Input parameters:
// nopt - length of arrays
// s0 - initial price
// x - strike price
// t - maturity
//
// Implementation assumes fixed constant parameters
// r - risk-neutral rate
// sig - volatility
//
// Output arrays for call and put prices:
// vcall, vput
//
// Note: the restrict keyword here tells the compiler
// that none of the arrays overlap in memory.
*/
void BlackScholesFormula_Compiler( int nopt,
tfloat r, tfloat sig, tfloat * restrict s0, tfloat * restrict x,
tfloat * restrict t, tfloat * restrict vcall, tfloat * restrict vput )
{
int i;
tfloat a, b, c, y, z, e;
tfloat d1, d2, w1, w2;
tfloat mr = -r;
tfloat sig_sig_two = sig * sig * TWO;
#pragma omp parallel for shared(s0, x, t, vcall, vput)
#pragma vector nontemporal (vcall, vput)
#pragma simd
for ( i = 0; i < nopt; i++ )
{
a = LOG( s0[i] / x[i] );
b = t[i] * mr;
z = t[i] * sig_sig_two;
c = QUARTER * z;
e = EXP ( b );
y = INVSQRT( z );
w1 = ( a - b + c ) * y;
w2 = ( a - b - c ) * y;
d1 = ERF( w1 );
d2 = ERF( w2 );
d1 = HALF + HALF*d1;
d2 = HALF + HALF*d2;
vcall[i] = s0[i]*d1 - x[i]*e*d2;
vput[i] = vcall[i] - s0[i] + x[i]*e;
}
}
void BlackScholesNaive(
int nopt, tfloat r, tfloat sig, const tfloat s0[], const tfloat x[],
const tfloat t[], tfloat vcall[], tfloat vput[] )
{
tfloat d1, d2, w1, w2;
int i;
for ( i=0; i<nopt; i++ )
{
d1 = ( LOG(s0[i]/x[i]) + (r + HALF*sig*sig)*t[i] ) /
( sig*SQRT(t[i]) );
d2 = ( LOG(s0[i]/x[i]) + (r - HALF*sig*sig)*t[i] ) /
( sig*SQRT(t[i]) );
w1 = HALF + HALF * ERF(d1 / SQRT(2));
w2 = HALF + HALF * ERF(d2 / SQRT(2));
vcall[i] = s0[i] * w1 - EXP(-r * t[i]) * x[i] * w2;
vput[i] = EXP(-r*t[i])*x[i] * -w2 - s0[i] * -w1;
}
}
|
rotlet_direct_rsrc.c | #include "math.h"
#include "rotlet_direct.h"
#ifdef _OPENMP
#include "omp.h"
#endif
void rotlet_direct_rsrc(double* restrict u,
const double* restrict xt,
const int Nt,
const double* restrict x,
const double* restrict f,
const int N,
const ewald_opts opt)
{
double r[3];
double xm[3];
int i1, i2, i3, m, n;
const int nbox = 1;
double rc2 = opt.rc * opt.rc;
double xi = opt.xi;
#ifdef _OPENMP
#pragma omp parallel for \
private(r,xm,i1,i2,i3,m,n) \
default(shared)
#endif
for(m=0; m<Nt; m++) // for all evaluation points
{
double um[3] = {0.0, 0.0, 0.0};
xm[0] = xt[m ];
xm[1] = xt[m+Nt ];
xm[2] = xt[m+2*Nt];
for(n=0; n<N; n++) // for all particles
{
double xmn[3] = {xm[0]-x[n ],
xm[1]-x[n+ N],
xm[2]-x[n+2*N]};
double f0 = f[n];
double f1 = f[n+N];
double f2 = f[n+2*N];
for(i1 = -nbox; i1<=nbox; i1++) // image boxes
for(i2 = -nbox; i2<=nbox; i2++)
for(i3 = -nbox; i3<=nbox; i3++)
{
r[0] = xmn[0]+opt.box[0]*i1;
r[1] = xmn[1]+opt.box[1]*i2;
r[2] = xmn[2]+opt.box[2]*i3;
double r2 = r[0]*r[0] + r[1]*r[1] + r[2]*r[2];
if(r2 > rc2 || r2 == 0)
continue; // skip outside rc
double rnorm = sqrt(r2);
double rxi = rnorm*xi;
double A = (erfc(rxi)/rnorm +
2*xi*exp(-rxi*rxi)/sqrt(PI)
) / r2;
um[0] += A*(f1*r[2] - f2*r[1]);
um[1] += A*(f2*r[0] - f0*r[2]);
um[2] += A*(f0*r[1] - f1*r[0]);
}
}
u[m ] = um[0];
u[m+Nt ] = um[1];
u[m+2*Nt] = um[2];
}
}
|
pi-v13.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
#if _DEBUG_
#define _DEBUG_ 1
#else
#define _DEBUG_ 0
#endif
int main(int argc, char *argv[]) {
double x, sum=0.0, pi=0.0;
#if !_DEBUG_
double start,end;
#endif
int i;
const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n";
if (argc < 2) {
fprintf(stderr, Usage);
exit(1);
}
int num_steps = atoi(argv[1]);
double step = 1.0/(double) num_steps;
#if !_DEBUG_
start= omp_get_wtime();
#endif
/* do computation -- using all threads */
// WARNING : correct code
#pragma omp parallel private(i,x)
{
#if _DEBUG_
int id = omp_get_thread_num();
#endif
#pragma omp for schedule(static) reduction(+:sum)
for (i=0; i < num_steps; i++) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
#if _DEBUG_
printf("thread id:%d it:%d\n",id,i);
#endif
}
// At this point the aggregated values of all threads are available
#pragma omp single
pi = step * sum;
}
#if !_DEBUG_
end = omp_get_wtime();
printf("Wall clock execution time = %.9f seconds\n", end-start);
#endif
/* print results */
printf("Value of pi = %12.10f\n", pi);
return EXIT_SUCCESS;
}
|
looop.c | #include<stdio.h>
#include<stdlib.h>
#include<omp.h>
#define TAM 64*127*5
#define ITERACOES_TESTE 100000
int main(){
int i,contador;
long soma;
int *vetor = calloc(TAM,sizeof(int));
if(vetor == NULL){
printf("Falha ao alocar memória");
return -1;
}
for(contador=0; contador < ITERACOES_TESTE; contador++){
#pragma omp parallel num_threads(2)
{//thread1 faz a primeira metade
if(omp_get_thread_num()==0){
for(int i=0; i<TAM/2; i++){
vetor[i] ++;
}
}else if(omp_get_thread_num()==1){//thread2 faz a segunda metade
for(int i=TAM/2; i<TAM; i++){
vetor[i] ++;
}
}
}
}
soma = 0;
for(i=0; i<TAM; i++){
soma += vetor[i];
}
printf("%ld\n", soma);
free(vetor);
return 0;
}
|
GB_binop__isgt_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isgt_int16
// A.*B function (eWiseMult): GB_AemultB__isgt_int16
// A*D function (colscale): GB_AxD__isgt_int16
// D*A function (rowscale): GB_DxB__isgt_int16
// C+=B function (dense accum): GB_Cdense_accumB__isgt_int16
// C+=b function (dense accum): GB_Cdense_accumb__isgt_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_int16
// C=scalar+B GB_bind1st__isgt_int16
// C=scalar+B' GB_bind1st_tran__isgt_int16
// C=A+scalar GB_bind2nd__isgt_int16
// C=A'+scalar GB_bind2nd_tran__isgt_int16
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x > y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_INT16 || GxB_NO_ISGT_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isgt_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isgt_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isgt_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isgt_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isgt_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__isgt_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isgt_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isgt_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isgt_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB_bind1st_tran__isgt_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB_bind2nd_tran__isgt_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.