source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
a.4.1.c | /* { dg-do run } */
#include <omp.h>
extern void abort (void);
void
subdomain (float *x, int istart, int ipoints)
{
int i;
for (i = 0; i < ipoints; i++)
x[istart + i] = 123.456;
}
void
sub (float *x, int npoints)
{
int iam, nt, ipoints, istart;
#pragma omp parallel default(shared) private(iam,nt,ipoints,istart)
{
iam = omp_get_thread_num ();
nt = omp_get_num_threads ();
ipoints = npoints / nt; /* size of partition */
istart = iam * ipoints; /* starting array index */
if (iam == nt - 1) /* last thread may do more */
ipoints = npoints - istart;
subdomain (x, istart, ipoints);
}
}
int
main ()
{
int i;
float array[10000];
sub (array, 10000);
for (i = 0; i < 10000; i++)
if (array[i] < 123.45 || array[i] > 123.46)
abort ();
return 0;
}
|
visual-effects.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V V IIIII SSSSS U U AAA L %
% V V I SS U U A A L %
% V V I SSS U U AAAAA L %
% V V I SS U U A A L %
% V IIIII SSSSS UUU A A LLLLL %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT SSSSS %
% E F F E C T SS %
% EEE FFF FFF EEE C T SSS %
% E F F E C T SS %
% EEEEE F F EEEEE CCCC T SSSSS %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/visual-effects.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% const double attenuate,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o attenuate: attenuate the random distribution.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
const double attenuate,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoTLS();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel);
if ((traits == UndefinedPixelTrait) ||
(noise_traits == UndefinedPixelTrait))
continue;
if ((noise_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(noise_image,channel,p[i],q);
continue;
}
SetPixelChannel(noise_image,channel,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)),
q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AddNoiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoTLS(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
shift_image=CloneImage(image,0,0,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse)
{
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
quantum;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) < quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) < quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum);
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) > quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) > quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(shift_image,ClampToQuantum(pixel.red),q);
SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q);
SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(shift_image);
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlueShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*edge_image;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
edge_image->alpha_trait=UndefinedPixelTrait;
charcoal_image=(Image *) NULL;
status=ClampImage(edge_image,exception);
if (status != MagickFalse)
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
status=NormalizeImage(charcoal_image,exception);
if (status != MagickFalse)
status=NegateImage(charcoal_image,MagickFalse,exception);
if (status != MagickFalse)
status=GrayscaleImage(charcoal_image,image->intensity,exception);
if (status == MagickFalse)
charcoal_image=DestroyImage(charcoal_image);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *blend,
% const PixelInfo *colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A character string indicating the level of blending as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *blend,
const PixelInfo *colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
#define Colorize(pixel,blend_percentage,colorize) \
(((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0)
CacheView
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
PixelInfo
blend_percentage;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse)
{
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) ||
(IsPixelInfoGray(colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace,exception);
if ((colorize_image->alpha_trait == UndefinedPixelTrait) &&
(colorize->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(colorize_image,OpaqueAlpha,exception);
if (blend == (const char *) NULL)
return(colorize_image);
GetPixelInfo(colorize_image,&blend_percentage);
flags=ParseGeometry(blend,&geometry_info);
blend_percentage.red=geometry_info.rho;
blend_percentage.green=geometry_info.rho;
blend_percentage.blue=geometry_info.rho;
blend_percentage.black=geometry_info.rho;
blend_percentage.alpha=(MagickRealType) TransparentAlpha;
if ((flags & SigmaValue) != 0)
blend_percentage.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
blend_percentage.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
blend_percentage.alpha=geometry_info.psi;
if (blend_percentage.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
blend_percentage.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
blend_percentage.alpha=geometry_info.chi;
}
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1)
#endif
for (y=0; y < (ssize_t) colorize_image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) colorize_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++)
{
PixelTrait traits = GetPixelChannelTraits(colorize_image,
(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum(
Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i),
GetPixelInfoChannel(colorize,(PixelChannel) i))),q);
}
q+=GetPixelChannels(colorize_image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorizeImageTag,progress,
colorize_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* FUTURE: modify to make use of a MagickMatrix Mutliply function
That should be provided in "matrix.c"
(ASIDE: actually distorts should do this too but currently doesn't)
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
u,
v,
y;
/*
Map given color_matrix, into a 6x6 matrix RGBKA and a constant
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse)
{
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Apply the ColorMatrix to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
h;
size_t
height;
GetPixelInfoPixel(image,p,&pixel);
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (h=0; h < (ssize_t) height; h++)
{
double
sum;
sum=ColorMatrix[h][0]*GetPixelRed(image,p)+ColorMatrix[h][1]*
GetPixelGreen(image,p)+ColorMatrix[h][2]*GetPixelBlue(image,p);
if (image->colorspace == CMYKColorspace)
sum+=ColorMatrix[h][3]*GetPixelBlack(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
sum+=ColorMatrix[h][4]*GetPixelAlpha(image,p);
sum+=QuantumRange*ColorMatrix[h][5];
switch (h)
{
case 0: pixel.red=sum; break;
case 1: pixel.green=sum; break;
case 2: pixel.blue=sum; break;
case 3: pixel.black=sum; break;
case 4: pixel.alpha=sum; break;
default: break;
}
}
SetPixelViaPixelInfo(color_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(color_image);
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorMatrixImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*canvas_view,
*implode_view,
*interpolate_view;
double
radius;
Image
*canvas_image,
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception);
implode_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*canvas_image->columns;
center.y=0.5*canvas_image->rows;
radius=center.x;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns*PerceptibleReciprocal((double)
canvas_image->rows);
else
if (canvas_image->columns < canvas_image->rows)
{
scale.x=(double) canvas_image->rows*PerceptibleReciprocal((double)
canvas_image->columns);
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(canvas_image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,implode_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
ssize_t
i;
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait implode_traits = GetPixelChannelTraits(implode_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(implode_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(implode_image,channel,p[i],q);
}
else
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin(MagickPI*sqrt((double) distance)*PerceptibleReciprocal(radius)/2),-amount);
status=InterpolatePixelChannels(canvas_image,interpolate_view,
implode_image,method,(double) (factor*delta.x*PerceptibleReciprocal(scale.x)+center.x),
(double) (factor*delta.y*PerceptibleReciprocal(scale.y)+center.y),q,exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(implode_image);
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,ImplodeImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,const size_t number_frames,
ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
const Image
*next;
ssize_t
n;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (n=1; n < (ssize_t) number_frames; n++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (n=0; n < (ssize_t) number_frames; n++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (n+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta*
GetNextImageInList(next)->rows+0.5),next->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
status=SetImageStorageClass(morph_image,DirectClass,exception);
if (status == MagickFalse)
{
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(morph_image,i);
PixelTrait traits = GetPixelChannelTraits(morph_image,channel);
PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel);
if ((traits == UndefinedPixelTrait) ||
(morph_traits == UndefinedPixelTrait))
continue;
if ((morph_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morph_image,channel,p[i],q);
continue;
}
SetPixelChannel(morph_image,channel,ClampToQuantum(alpha*
GetPixelChannel(morph_images,channel,q)+beta*p[i]),q);
}
p+=GetPixelChannels(morph_image);
q+=GetPixelChannels(morph_images);
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (n < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *magick_restrict random_info,
const double pixel,const double noise)
{
MagickRealType
plasma;
plasma=pixel+noise*GetPseudoRandomValue(random_info)-noise/2.0;
return(ClampToQuantum(plasma));
}
static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view,
CacheView *u_view,CacheView *v_view,RandomInfo *magick_restrict random_info,
const SegmentInfo *magick_restrict segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
double
plasma;
MagickStatusType
status;
const Quantum
*magick_restrict u,
*magick_restrict v;
Quantum
*magick_restrict q;
ssize_t
i;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) < MagickEpsilon) &&
(fabs(segment->y2-segment->y1) < MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=CastDoubleToLong(ceil((segment->x1+segment->x2)/2-0.5));
y_mid=CastDoubleToLong(ceil((segment->y1+segment->y2)/2-0.5));
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
return(status == 0 ? MagickFalse : MagickTrue);
}
x_mid=CastDoubleToLong(ceil((segment->x1+segment->x2)/2-0.5));
y_mid=CastDoubleToLong(ceil((segment->y1+segment->y2)/2-0.5));
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
status=MagickTrue;
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->x2-x_mid) >= MagickEpsilon))
{
/*
Left pixel.
*/
x=CastDoubleToLong(ceil(segment->x1-0.5));
u=GetCacheViewVirtualPixels(u_view,x,CastDoubleToLong(ceil(
segment->y1-0.5)),1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,CastDoubleToLong(ceil(
segment->y2-0.5)),1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) >= MagickEpsilon)
{
/*
Right pixel.
*/
x=CastDoubleToLong(ceil(segment->x2-0.5));
u=GetCacheViewVirtualPixels(u_view,x,CastDoubleToLong(ceil(
segment->y1-0.5)),1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,CastDoubleToLong(ceil(
segment->y2-0.5)),1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickFalse);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
/*
Bottom pixel.
*/
y=CastDoubleToLong(ceil(segment->y2-0.5));
u=GetCacheViewVirtualPixels(u_view,CastDoubleToLong(ceil(
segment->x1-0.5)),y,1,1,exception);
v=GetCacheViewVirtualPixels(v_view,CastDoubleToLong(ceil(
segment->x2-0.5)),y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) >= MagickEpsilon)
{
/*
Top pixel.
*/
y=CastDoubleToLong(ceil(segment->y1-0.5));
u=GetCacheViewVirtualPixels(u_view,CastDoubleToLong(ceil(
segment->x1-0.5)),y,1,1,exception);
v=GetCacheViewVirtualPixels(v_view,CastDoubleToLong(ceil(
segment->x2-0.5)),y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) >= MagickEpsilon) ||
(fabs(segment->y1-segment->y2) >= MagickEpsilon))
{
/*
Middle pixel.
*/
x=CastDoubleToLong(ceil(segment->x1-0.5));
y=CastDoubleToLong(ceil(segment->y1-0.5));
u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception);
x=CastDoubleToLong(ceil(segment->x2-0.5));
y=CastDoubleToLong(ceil(segment->y2-0.5));
v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(status == 0 ? MagickFalse : MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
u_view=AcquireVirtualCacheView(image,exception);
v_view=AcquireVirtualCacheView(image,exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth,exception);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the PolaroidImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const char *caption,const double angle,
% const PixelInterpolateMethod method,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o caption: the Polaroid caption.
%
% o angle: Apply the effect along this angle.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const char *caption,const double angle,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
if (caption != (const char *) NULL)
{
char
*text;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
text=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,caption,
exception);
if (text != (char *) NULL)
{
char
geometry[MagickPathExtent];
DrawInfo
*annotate_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
(void) CloneString(&annotate_info->text,text);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,
&metrics,&text,exception);
status=SetImageExtent(caption_image,image->columns,(size_t)
((count+1)*(metrics.ascent-metrics.descent)+0.5),exception);
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image,exception);
(void) CloneString(&annotate_info->text,text);
(void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info,exception);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
text=DestroyString(text);
}
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image,exception);
(void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum,
quantum,exception);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,caption_image,OverCompositeOp,
MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception);
caption_image=DestroyImage(caption_image);
}
(void) QueryColorCompliance("none",AllCompliance,
&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,method,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,picture_image,OverCompositeOp,
MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception);
picture_image=DestroyImage(picture_image);
(void) QueryColorCompliance("none",AllCompliance,
&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse)
{
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(sepia_image,ClampToQuantum(tone),q);
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
tone=threshold/7.0;
if ((double) GetPixelGreen(image,q) < tone)
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
if ((double) GetPixelBlue(image,q) < tone)
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(sepia_image);
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SepiaToneImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image,exception);
(void) ContrastImage(sepia_image,MagickTrue,exception);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double alpha,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double alpha,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
ChannelType
channel_mask;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
PixelInfo
background_color;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace,exception);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod,
exception);
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color,
exception);
clone_image->alpha_trait=BlendPixelTrait;
border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception);
/*
Shadow image.
*/
status=MagickTrue;
background_color=border_image->background_color;
background_color.alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(border_image,exception);
for (y=0; y < (ssize_t) border_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
if (border_image->alpha_trait != UndefinedPixelTrait)
background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0;
SetPixelViaPixelInfo(border_image,&background_color,q);
q+=GetPixelChannels(border_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
border_image=DestroyImage(border_image);
return((Image *) NULL);
}
channel_mask=SetImageChannelMask(border_image,AlphaChannel);
shadow_image=BlurImage(border_image,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
(void) SetPixelChannelMask(shadow_image,channel_mask);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the
% center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
random_info=AcquireRandomInfoTLS();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) random_image->columns; x++)
{
double
value;
ssize_t
i;
value=GetPseudoRandomValue(random_info[id]);
for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=ClampToQuantum(QuantumRange*value);
}
q+=GetPixelChannels(random_image);
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoTLS(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
status=ClampImage(dodge_image,exception);
if (status != MagickFalse)
status=NormalizeImage(dodge_image,exception);
if (status != MagickFalse)
status=NegateImage(dodge_image,MagickFalse,exception);
if (status != MagickFalse)
status=TransformImage(&dodge_image,(char *) NULL,"50%",exception);
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp,
MagickTrue,0,0,exception);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
if (blend_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlpha(blend_image,TransparentAlpha,exception);
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue,
0,0,exception);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class == PseudoClass)
{
ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
return(SyncImage(image,exception));
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] > threshold)
q[i]=QuantumRange-q[i];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelInfo
pixel;
Quantum
*q;
ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse)
{
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=stegano_image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
ssize_t
offset;
(void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel,
exception);
offset=k/(ssize_t) stegano_image->columns;
if (offset >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (Quantum *) NULL)
break;
switch (c)
{
case 0:
{
SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 1:
{
SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 2:
{
SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == stegano_image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse)
{
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace,exception);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
Quantum
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) ||
(r == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(stereo_image,GetPixelRed(left_image,p),r);
SetPixelGreen(stereo_image,GetPixelGreen(right_image,q),r);
SetPixelBlue(stereo_image,GetPixelBlue(right_image,q),r);
if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0)
SetPixelAlpha(stereo_image,(GetPixelAlpha(left_image,p)+
GetPixelAlpha(right_image,q))/2,r);
p+=GetPixelChannels(left_image);
q+=GetPixelChannels(right_image);
r+=GetPixelChannels(stereo_image);
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*canvas_view,
*interpolate_view,
*swirl_view;
double
radius;
Image
*canvas_image,
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
swirl_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
if (swirl_image->background_color.alpha_trait != UndefinedPixelTrait)
(void) SetImageAlphaChannel(swirl_image,OnAlphaChannel,exception);
/*
Compute scaling factor.
*/
center.x=(double) canvas_image->columns/2.0;
center.y=(double) canvas_image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,swirl_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(swirl_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(swirl_image,channel,p[i],q);
}
}
else
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolatePixelChannels(canvas_image,interpolate_view,
swirl_image,method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),
(double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,
exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(swirl_image);
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,SwirlImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *blend,
% const PixelInfo *tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *blend,
const PixelInfo *tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
double
intensity;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
color_vector;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
tint_image=CloneImage(image,0,0,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse)
{
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelInfoGray(tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace,exception);
if (blend == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
GetPixelInfo(image,&color_vector);
flags=ParseGeometry(blend,&geometry_info);
color_vector.red=geometry_info.rho;
color_vector.green=geometry_info.rho;
color_vector.blue=geometry_info.rho;
color_vector.alpha=(MagickRealType) OpaqueAlpha;
if ((flags & SigmaValue) != 0)
color_vector.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
color_vector.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
color_vector.alpha=geometry_info.psi;
if (image->colorspace == CMYKColorspace)
{
color_vector.black=geometry_info.rho;
if ((flags & PsiValue) != 0)
color_vector.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
color_vector.alpha=geometry_info.chi;
}
intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint);
color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity);
color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity);
color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity);
color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity);
color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity);
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
double
weight;
GetPixelInfo(image,&pixel);
weight=QuantumScale*GetPixelRed(image,p)-0.5;
pixel.red=(MagickRealType) GetPixelRed(image,p)+color_vector.red*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelGreen(image,p)-0.5;
pixel.green=(MagickRealType) GetPixelGreen(image,p)+color_vector.green*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlue(image,p)-0.5;
pixel.blue=(MagickRealType) GetPixelBlue(image,p)+color_vector.blue*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlack(image,p)-0.5;
pixel.black=(MagickRealType) GetPixelBlack(image,p)+color_vector.black*
(1.0-(4.0*(weight*weight)));
pixel.alpha=(MagickRealType) GetPixelAlpha(image,p);
SetPixelViaPixelInfo(tint_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(tint_image);
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TintImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MagickPathExtent];
DrawInfo
*draw_info;
Image
*canvas,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
canvas->alpha_trait=BlendPixelTrait;
oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (oval_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
(void) QueryColorCompliance("#000000",AllCompliance,
&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image,exception);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke,
exception);
(void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g,"
"0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x,
image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
blur_image->alpha_trait=UndefinedPixelTrait;
(void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue,
0,0,exception);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas,FlattenLayer,exception);
canvas=DestroyImage(canvas);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace,exception);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o interpolate: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*canvas_image_view,
*wave_view;
float
*sine_map;
Image
*canvas_image,
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlpha(canvas_image,OpaqueAlpha,exception);
wave_image=CloneImage(canvas_image,canvas_image->columns,(size_t)
(canvas_image->rows+2.0*fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
/*
Allocate sine map.
*/
sine_map=(float *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (float *) NULL)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=(float) fabs(amplitude)+amplitude*sin((double)
((2.0*MagickPI*i)*PerceptibleReciprocal(wave_length)));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
canvas_image_view=AcquireVirtualCacheView(canvas_image,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(canvas_image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_image_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolatePixelChannels(canvas_image,canvas_image_view,
wave_image,method,(double) x,(double) (y-sine_map[x]),q,exception);
if (status == MagickFalse)
break;
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(wave_image);
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,WaveImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
canvas_image_view=DestroyCacheView(canvas_image_view);
canvas_image=DestroyImage(canvas_image);
sine_map=(float *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
ssize_t
i;
p=pixels;
q=pixels+scale*stride;
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
ssize_t
channel;
static const float
noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f,
0.0080f, 0.0044f };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1,
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++)
{
ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
PixelChannel
pixel_channel;
PixelTrait
traits;
if (status == MagickFalse)
continue;
traits=GetPixelChannelTraits(image,(PixelChannel) channel);
if (traits == UndefinedPixelTrait)
continue;
pixel_channel=GetPixelChannelChannel(image,channel);
if ((pixel_channel != RedPixelChannel) &&
(pixel_channel != GreenPixelChannel) &&
(pixel_channel != BluePixelChannel))
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[i++]=(float) p[channel];
p+=GetPixelChannels(image);
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
float
*magick_restrict p,
*magick_restrict q;
ssize_t
c;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,((size_t) 1UL << level),p);
q+=low_pass;
for (c=0; c < (ssize_t) image->columns; c++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
float
*magick_restrict p,
*magick_restrict q;
ssize_t
r;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,((size_t) 1UL << level),p);
for (r=0; r < (ssize_t) image->rows; r++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
offset;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
offset=GetPixelChannelOffset(noise_image,pixel_channel);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
pixel;
pixel=(MagickRealType) pixels[i]+pixels[low_pass+i];
q[offset]=ClampToQuantum(pixel);
i++;
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
|
SpatialFractionalMaxPooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "THNN/generic/SpatialFractionalMaxPooling.c"
#else
static int64_t* THNN_(SpatialFractionalMaxPooling_generateIntervals)(
scalar_t sample,
int64_t inputSize,
int64_t outputSize,
int poolSize) {
scalar_t alpha = (scalar_t) (inputSize - poolSize) / (scalar_t) (outputSize - 1);
int64_t* sequence = (int64_t*) THAlloc(sizeof(int64_t) * outputSize);
int64_t i;
for (i = 0; i < outputSize - 1; ++i) {
sequence[i] =
(int64_t) ((i + sample) * alpha) - (int64_t) (sample * alpha);
}
sequence[outputSize - 1] = inputSize - poolSize;
return sequence;
}
static void THNN_(SpatialFractionalMaxPooling_updateOutput_frame)(
scalar_t* input,
scalar_t* output,
THIndex_t* indices,
scalar_t* randomSamples,
int64_t numPlanes,
int64_t inputW, int64_t inputH,
int64_t outputW, int64_t outputH,
int poolSizeW, int poolSizeH) {
int64_t plane;
#pragma omp parallel for private(plane)
for (plane = 0; plane < numPlanes; ++plane) {
/* each plane contains 2 random samples, one for W and one for H */
scalar_t* randomSamplesForPlane = randomSamples + plane * 2;
/* Generate interval sequence */
int64_t* sequenceW =
THNN_(SpatialFractionalMaxPooling_generateIntervals)(
randomSamplesForPlane[0], inputW, outputW, poolSizeW);
int64_t* sequenceH =
THNN_(SpatialFractionalMaxPooling_generateIntervals)(
randomSamplesForPlane[1], inputH, outputH, poolSizeH);
/* loop over output */
int64_t h, w;
scalar_t* inputForPlane = input + plane * inputW * inputH;
scalar_t* outputForPlane = output + plane * outputW * outputH;
THIndex_t* indicesForPlane = indices + plane * outputW * outputH;
for (h = 0; h < outputH; ++h) {
int64_t inputHStart = sequenceH[h];
for (w = 0; w < outputW; ++w) {
int64_t inputWStart = sequenceW[w];
scalar_t maxVal = -THInf;
int64_t maxIndex = -1;
int64_t h2, w2;
for (h2 = inputHStart; h2 < inputHStart + poolSizeH; ++h2) {
for (w2 = inputWStart; w2 < inputWStart + poolSizeW; ++w2) {
THAssert(h2 >= 0 && h2 < inputH);
THAssert(w2 >= 0 && w2 < inputW);
int64_t planeIndex = h2 * inputW + w2;
scalar_t val = inputForPlane[planeIndex];
if (val > maxVal) {
maxVal = val;
maxIndex = planeIndex;
}
}
}
THAssert(maxVal != -THInf);
THAssert(maxIndex != -1);
outputForPlane[h * outputW + w] = maxVal;
/* +1 to lua index */
indicesForPlane[h * outputW + w] = maxIndex + TH_INDEX_BASE;
}
}
THFree(sequenceW);
THFree(sequenceH);
}
}
void THNN_(SpatialFractionalMaxPooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
int outputW, int outputH,
int poolSizeW, int poolSizeH,
THIndexTensor *indices,
THTensor *randomSamples) {
int64_t numBatch = 1;
int planeDim = 0;
int heightDim = 1;
int widthDim = 2;
int64_t numInputDims = THTensor_(nDimensionLegacyNoScalars)(input);
THNN_ARGCHECK(!input->is_empty() && (numInputDims == 3 || numInputDims == 4), 2, input,
"non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s");
if (numInputDims == 4) {
numBatch = THTensor_(size)(input, 0);
planeDim++;
heightDim++;
widthDim++;
}
/* sizes */
int64_t numPlanes = THTensor_(size)(input, planeDim);
int64_t inputH = THTensor_(size)(input, heightDim);
int64_t inputW = THTensor_(size)(input, widthDim);
THArgCheck(outputH + poolSizeH - 1 <= inputH, 7,
"poolSizeH (%d) too large relative to input height (%d)",
poolSizeH, inputH);
THArgCheck(outputW + poolSizeW - 1 <= inputW, 6,
"poolSizeW (%d) too large relative to input width (%d)",
poolSizeW, inputW);
/* get contiguous input */
input = THTensor_(newContiguous)(input);
if (numInputDims == 3) {
/* resize output */
THTensor_(resize3d)(output, numPlanes, outputH, outputW);
/* indices will contain the locations for each output point */
THIndexTensor_(resize3d)(indices, numPlanes, outputH, outputW);
THNN_(SpatialFractionalMaxPooling_updateOutput_frame)(
input->data<scalar_t>(),
output->data<scalar_t>(),
THIndexTensor_(data)(indices),
randomSamples->data<scalar_t>(),
numPlanes, inputW, inputH, outputW, outputH, poolSizeW, poolSizeH);
} else {
THTensor_(resize4d)(output, numBatch, numPlanes, outputH, outputW);
/* indices will contain the locations for each output point */
THIndexTensor_(resize4d)(indices, numBatch, numPlanes, outputH, outputW);
int64_t batch;
#pragma omp parallel for private(batch)
for (batch = 0; batch < numBatch; ++batch) {
THNN_(SpatialFractionalMaxPooling_updateOutput_frame)(
input->data<scalar_t>() + batch * numPlanes * inputH * inputW,
output->data<scalar_t>() + batch * numPlanes * outputH * outputW,
THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW,
randomSamples->data<scalar_t>() + batch * numPlanes * 2,
numPlanes, inputW, inputH, outputW, outputH, poolSizeW, poolSizeH);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(input);
}
static void THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)(
scalar_t* gradInput,
scalar_t* gradOutput,
THIndex_t* indices,
int64_t numPlanes,
int64_t inputW, int64_t inputH,
int64_t outputW, int64_t outputH) {
int64_t plane;
#pragma omp parallel for private(plane)
for (plane = 0; plane < numPlanes; plane++) {
scalar_t* gradInputForPlane = gradInput + plane * inputW * inputH;
scalar_t* gradOutputForPlane = gradOutput + plane * outputW * outputH;
THIndex_t* indicesForPlane = indices + plane * outputW * outputH;
int64_t h, w;
for (h = 0; h < outputH; ++h) {
for (w = 0; w < outputW; ++w) {
int64_t outputIndex = h * outputW + w;
int64_t index = indicesForPlane[outputIndex] - TH_INDEX_BASE;
THAssert(index >= 0 && index < inputW * inputH);
gradInputForPlane[index] += gradOutputForPlane[outputIndex];
}
}
}
}
void THNN_(SpatialFractionalMaxPooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
int outputW, int outputH,
int poolSizeW, int poolSizeH,
THIndexTensor *indices) {
int64_t numBatch = 1;
int planeDim = 0;
int heightDim = 1;
int widthDim = 2;
int64_t numInputDims = THTensor_(nDimensionLegacyNoScalars)(input);
if (numInputDims == 4) {
numBatch = THTensor_(size)(input, 0);
planeDim = 1;
heightDim++;
widthDim++;
}
/* sizes */
int64_t numPlanes = THTensor_(size)(input, planeDim);
int64_t inputH = THTensor_(size)(input, heightDim);
int64_t inputW = THTensor_(size)(input, widthDim);
THArgCheck(outputW == THTensor_(size)(gradOutput, widthDim), 3,
"gradOutput width unexpected");
THArgCheck(outputH == THTensor_(size)(gradOutput, heightDim), 3,
"gradOutput height unexpected");
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
/* backprop */
if (numInputDims == 3) {
THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)(
gradInput->data<scalar_t>(),
gradOutput->data<scalar_t>(),
THIndexTensor_(data)(indices),
numPlanes, inputW, inputH, outputW, outputH);
} else {
int64_t batch;
#pragma omp parallel for private(batch)
for (batch = 0; batch < numBatch; ++batch) {
THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)(
gradInput->data<scalar_t>() + batch * numPlanes * inputH * inputW,
gradOutput->data<scalar_t>() + batch * numPlanes * outputH * outputW,
THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW,
numPlanes, inputW, inputH, outputW, outputH);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(gradOutput);
}
#endif
|
semtry.c | #include <stdio.h>
#include <stdlib.h>
int mutex = 1;
// Number of full slots as 0
int full = 0;
// Number of empty slots as size
// of buffer
int empty = 10, x = 0;
// Function for producer
void producer()
{
// Decrease mutex value by 1
--mutex;
// Increase the number of full
// slots by 1
++full;
// Decrease the number of empty
// slots by 1
--empty;
// Item produced
x++;
printf("\nProducer produces "
"item %d",
x);
// Increase mutex value by 1
++mutex;
}
// Function for consumer
void consumer()
{
// Decrease mutex value by 1
--mutex;
// Decrease the number of full
// slots by 1
--full;
// Increase the number of empty
// slots by 1
++empty;
printf("\nConsumer consumes "
"item %d",
x);
x--;
// Increase mutex value by 1
++mutex;
}
int main()
{
int n, i;
printf("\n1.Producer"
"\n2.Consumer"
"\n3.Exit");
#pragma omp critical
for (i = 1; i > 0; i++) {
printf("\nEnter your choice:");
scanf("%d", &n);
// Switch Cases
switch (n) {
case 1:
// If mutex is 1 and empty
// is non-zero, then it is
// possible to produce
if ((mutex == 1)
&& (empty != 0)) {
producer();
}
// Otherwise, print buffer
// is full
else {
printf("Buffer is full!");
}
break;
case 2:
// If mutex is 1 and full
// is non-zero, then it is
// possible to consume
if ((mutex == 1)
&& (full != 0)) {
consumer();
}
// Otherwise, print Buffer
// is empty
else {
printf("\nBuffer is empty!");
}
break;
// Exit Condition
case 3:
exit(0);
break;
}
}
return 0;
}
|
GB_unop__identity_fp64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp64_uint64)
// op(A') function: GB (_unop_tran__identity_fp64_uint64)
// C type: double
// A type: uint64_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp64_uint64)
(
double *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp64_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dft.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <float.h>
#include <stdint.h>
#include <getopt.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <fcntl.h>
#include <complex.h>
#include <fftw3.h>
#include <time.h>
#include "grid.h"
void image_dft(double complex *uvgrid, int grid_size, double lambda,
struct vis_data *vis, int iter){
int total_steps = grid_size;
int steps_completed = 0;
#pragma omp parallel for schedule(dynamic)
for (int y = 0; y<grid_size; y+=10){
int l = (y - grid_size / 2)/lambda;
for (int x = 0; x<grid_size; x+=iter){
int m = (x - grid_size / 2)/lambda;
double real_p = 0;
double complex_p = 0;
for(int bl = 0; bl < vis->bl_count; ++bl){
for(int time = 0; time < vis->bl[bl].time_count; ++time){
for (int freq = 0; freq < vis->bl[bl].freq_count; ++freq){
double complex visibility = vis->bl[bl].vis[time*vis->bl[bl].freq_count + freq];
double subang1 = m * vis->bl[bl].uvw[time*vis->bl[bl].freq_count + freq];
double subang2 = l * vis->bl[bl].uvw[time*vis->bl[bl].freq_count + freq + 1];
double subang3 = (sqrt(1-l*l-m*m)-1) * vis->bl[bl].uvw[time*vis->bl[bl].freq_count + freq + 2];
double angle = 2 * M_PI * subang1 + subang2 + subang3;
real_p += creal(visibility) * cos(angle) + cimag(visibility) * sin(angle);
complex_p += -creal(visibility) * sin(angle) + cimag(visibility) * cos(angle);
}
}
}
uvgrid[y*grid_size + x] = real_p + complex_p * I;
//printf("Progress: %d/%d \r",(y*grid_size + x),(grid_size*grid_size));
}
#pragma omp atomic
++steps_completed;
#pragma omp critical
printf("Progress: %d/%d \r",steps_completed,total_steps);
}
}
int main(int argc, char *argv[]){
//Structure for reporting memory usage:
struct rusage *rusage_cp = malloc(sizeof(struct rusage));
// Read parameters
static struct option options[] =
{
{"theta", required_argument, 0, 't' },
{"lambda", required_argument, 0, 'l' },
{"image", optional_argument, 0, 'i' },
{"min-bl", optional_argument, 0, 'b' },
{"max-bl", optional_argument, 0, 'B' },
{"iter", optional_argument, 0, 'I' },
{0, 0, 0, 0}
};
int option_index = 0;
double theta = 0, lambda = 0;
char *image_file = NULL;
double bl_min = DBL_MIN, bl_max = DBL_MAX;
int c; int invalid = 0;
long iter = 1;
while ((c = getopt_long(argc, argv, ":", options, &option_index)) != -1) {
switch(c) {
case 't': theta = atof(optarg); break;
case 'l': lambda = atof(optarg); break;
case 'i': image_file = optarg; break;
case 'b': bl_min = atof(optarg); break;
case 'B': bl_max = atof(optarg); break;
case 'I': iter = atol(optarg); break;
default: invalid = 1; break;
}
}
// Check grid parameters
int grid_size = (int)(theta * lambda);
size_t grid_byte_size = grid_size * grid_size * sizeof(double complex);
if (grid_size <= 0) {
fprintf(stderr, "Invalid grid configuration!\n");
invalid = 1;
}
// Must have an input file
const char *vis_file = 0;
if (optind + 1 == argc) {
vis_file = argv[optind];
} else {
printf("Please supply a visibility input file!\n");
invalid = 1;
}
if (invalid) {
printf("usage: %s --theta=THETA --lambda=LAM [--image=IMAGE]\n", argv[0]);
printf(" [--min-bl=MIN_BL] [--max-bl=MAX_BL]\n");
printf(" INPUT\n");
printf("\n");
printf("optional arguments:\n");
printf(" --theta=THETA Field of view size (in radians)\n");
printf(" --lambda=LAM uv grid size (in wavelenghts)\n");
printf(" --image=IMAGE image output file\n");
printf(" --min-bl=MIN_BL Minimum baseline length to consider (in km)\n");
printf(" --max-bl=MAX_BL Maximum baseline length to consider (in km)\n");
printf(" --iter=ITER Samples every +=ITER point in fourier space. Quickens DFT.\n");
printf("positional arguments:\n");
printf(" input input visibilities\n");
return 1;
}
// Intialise HDF5
init_dtype_cpx();
// Open files
struct vis_data vis;
int grid_fd = -1, image_fd = -1;
if (load_vis(vis_file, &vis, bl_min, bl_max)) {
return 1;
}
if (image_file) {
image_fd = open(image_file, O_CREAT | O_TRUNC | O_WRONLY, 0666);
if (image_fd == -1) {
perror("Failed to open image file");
return 1;
}
}
// Allocate grid
printf("\nGrid size: %d x %d (%.2f GB)\n", grid_size, grid_size, (double)(grid_byte_size)/1000000000);
double complex *uvgrid = (double complex *)calloc(grid_byte_size, 1);
// Simple uniform weight (we re-use the grid to save an allocation)
printf("Weighting...\n");
weight((unsigned int *)uvgrid, grid_size, theta, &vis);
memset(uvgrid, 0, grid_size * grid_size * sizeof(unsigned int));
// Set up performance counters
struct perf_counters counters;
open_perf_counters(&counters);
// Start timer
struct timespec start_time;
clock_gettime(CLOCK_REALTIME, &start_time);
uint64_t flops = 0, mem = 0;
printf("Direct DFT...(this takes a LONG time)\n");
if(iter>1) printf("Sampling every %ld points in fourier space (saves time).\n",iter);
// DFT HERE
image_dft(uvgrid, grid_size, lambda, &vis,iter);
struct timespec end_time;
clock_gettime(CLOCK_REALTIME, &end_time);
printf("\nGrid-Time: %.3f",
(double)(end_time.tv_sec - start_time.tv_sec) +
(double)(end_time.tv_nsec - start_time.tv_nsec) / 1000000000);
//Lets get some memory stats:
getrusage(RUSAGE_SELF, rusage_cp);
printf("\nMaximum Grid Memory: %.2f GB", (float)rusage_cp->ru_maxrss/(1024*1024));
// Show performance counters after gridding
printf("\nCounters:\n");
print_perf_counters(&counters, flops, mem);
// Make hermitian
printf("\nMake hermitian...\n");
make_hermitian(uvgrid, grid_size);
if (image_fd != -1) {
printf("FFT...\n");
// First shift zero frequency
fft_shift(uvgrid, grid_size);
// Do DFT. Complex-to-complex to keep with numpy (TODO: optimize)
fftw_plan plan;
plan = fftw_plan_dft_2d(grid_size, grid_size, uvgrid, uvgrid, -1, FFTW_ESTIMATE);
fftw_execute_dft(plan, uvgrid, uvgrid);
// Shift zero frequency back into centre
fft_shift(uvgrid, grid_size);
// Write real part to disk
printf("Write image...\n");
int i;
double *row = malloc(sizeof(double) * grid_size);
for (i = 0; i < grid_size; i++) {
int j;
for (j = 0; j < grid_size; j++) {
row[j] = creal(uvgrid[i*grid_size+j]);
}
write(image_fd, row, sizeof(double) * grid_size);
}
close(image_fd);
}
getrusage(RUSAGE_SELF, rusage_cp);
printf("\nMax Memory: %.2f GB", (float)rusage_cp->ru_maxrss/(1024*1024));
return 0;
}
|
ofmo-os-xxxx.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "ofmo-index.h"
#include "ofmo-twoint.h"
#ifdef _OPENMP
#include <omp.h>
#else
#include "omp-dummy.h"
#endif
extern void fmt( double F[],
const int m, const double T, const double cssss );
extern int ofmo_integ_add_fock( const int nao, const size_t nstored_eri,
const double eri_val[], const short int eri_ind4[],
const double D[], double G[] );
extern int ofmo_twoint_core_rys_xxxx(
const int mythread,
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
const int *nijps, const double vzeta[], const double vdkab[],
const double vxiza[], const double BA[3],
const int *nklps, const double veta[], const double vdkcd[],
const double vxizc[], const double DC[3], const double AC[3],
double DINT[] );
#ifndef false
#define false 0
#endif
#ifndef true
#define true 1
#endif
#define HALF 0.5e0
#define ZERO 0.e0
#define ONE 1.e0
#define OFMO_EBUF_FULL 1
#define OFMO_EBUF_NOFULL 0
#define MAXNPSPAIR 100
#define EPS_PS_PAIR 1.e-32
#define EPS_CS_PAIR2 1.e-30
// 2次元整数配列の確保
static int** ofmo_alloc_imatrix( const int na, const int nb ) {
int **ip, i;
ip = (int**)malloc( sizeof(int*) * na );
ip[0] = (int*)malloc( sizeof(int) * na * nb );
for (i=1; i<na; i++ ) ip[i] = ip[i-1] + nb;
return ip;
}
// 二次元整数配列の解放
static void ofmo_free_imatrix( int** ip ) {
if ( ip ) {
if ( ip[0] ) free( ip[0] );
free( ip );
}
}
// 4次元整数配列の確保
static int**** ofmo_alloc_i4d( int na, int nb, int nc, int nd ) {
int ****ip, i, j, k;
ip = (int****)malloc( sizeof(int***) * na );
ip[0] = (int***)malloc( sizeof(int**) * na * nb );
ip[0][0] = (int**)malloc( sizeof(int*) * na * nb * nc );
ip[0][0][0] = (int* )malloc( sizeof(int ) * na * nb * nc * nd );
for ( i=1; i<na; i++ ) ip[i] = ip[i-1] + nb;
for ( j=1; j<nb; j++ ) ip[0][j] = ip[0][j-1] + nc;
for ( i=1; i<na; i++ ) {
for ( j=0; j<nb; j++ ) ip[i][j] = ip[i-1][j] + nb * nc;
}
for ( k=1; k<nc; k++ ) ip[0][0][k] = ip[0][0][k-1] + nd;
for ( j=1; j<nb; j++ ) {
for ( k=0; k<nc; k++ ) ip[0][j][k] = ip[0][j-1][k] + nc * nd;
}
for ( i=1; i<na; i++ ) {
for ( j=0; j<nb; j++ ) {
for ( k=0; k<nc; k++ )
ip[i][j][k] = ip[i-1][j][k] + nb * nc * nd;
}
}
return ip;
}
// 4次元整数配列の解放
static void ofmo_free_i4d( int**** ip ) {
if ( ip ) {
if ( ip[0] ) {
if ( ip[0][0] ) {
if ( ip[0][0][0] ) free( ip[0][0][0] );
free ( ip[0][0] );
}
free( ip[0] );
}
free( ip );
}
}
/* ofmo-index.cで生成される変数 */
static int *NNAO;
static int *LAOT;
static int *INDX;
static int **ANGM;
static int **NAM;
static int **NAP;
static double *DFACT;
/* VRR関数群で使用する変数 */
static int ***V_VADD = NULL;
static int ***V_Mindex = NULL;
static int ***V_KLindex = NULL;
static int ***V_IJindex = NULL;
static int ***V_Mmin = NULL;
static double **V_ev = NULL;
/* HRR計算で使用する変数 */
static int *****V_HADD = NULL;
static double **V_eh = NULL;
/* 縮約分子積分の格納場所 */
static double **DINTEG_MASTER = NULL;
/* カットオフテーブル関連 */
static double _CK_;
// VRR計算で必要となるアドレス情報などを設定する
static int ofmo_vrr_make_add(
const int mythread,
const int La, const int Lb, const int Lc, const int Ld,
const int mmajor
) {
int vrr_mem, Lab, Lcd, Labcd, mab, mcd;
int imin, mmin, mmax, nm;
int **Mindex, **KLindex, **IJindex, **Mmin, **VADD;
Mindex = V_Mindex[mythread];
KLindex = V_KLindex[mythread];
IJindex = V_IJindex[mythread];
Mmin = V_Mmin[mythread];
VADD = V_VADD[mythread];
Lab = La + Lb;
Lcd = Lc + Ld;
Labcd = Lab + Lcd;
vrr_mem = 0;
// (xs,ss) type
// m*Mmindex[mab][mcd] + (iao-iao0)*IJindex[mab][mcd]
mcd = 0;
for ( mab=0; mab<=Lab; mab++ ) {
if ( mmajor ) {
Mindex[mab][mcd] = NNAO[mab];
KLindex[mab][mcd] = 0;
IJindex[mab][mcd] = 1;
} else {
Mindex[mab][mcd] = 1;
KLindex[mab][mcd] = 0;
IJindex[mab][mcd] = (Labcd - mab + 1);
}
Mmin[mab][mcd] = 0;
VADD[mab][mcd] = vrr_mem;
vrr_mem += (Labcd - mab + 1) * NNAO[mab];
}
// (xs,ys) type
// m*Mindex[mab][mcd] + (kao-kao0)*KLindex[mab][mcd]
// + (iao-iao0)*IJindex[mab][mcd]
for ( mcd=1; mcd<=Lcd; mcd++ ) {
mmax = Lcd - mcd;
imin = La - Lcd + mcd;
if ( imin < 0 ) imin = 0;
for ( mab=imin; mab<=Lab; mab++ ) {
mmin = La - mab;
if ( mmin < 0 ) mmin = 0;
Mmin[mab][mcd] = mmin;
nm = mmax - mmin + 1;
if ( mmajor ) {
Mindex[mab][mcd] = NNAO[mab] * NNAO[mcd];
KLindex[mab][mcd] = 1;
IJindex[mab][mcd] = NNAO[mcd];
} else {
Mindex[mab][mcd] = 1;
KLindex[mab][mcd] = nm;
IJindex[mab][mcd] = nm * NNAO[mcd];
}
VADD[mab][mcd] = vrr_mem;
vrr_mem += nm * NNAO[mab] * NNAO[mcd];
}
}
return vrr_mem;
}
// HRR計算で必要となるアドレス情報などを設定する
static int ofmo_hrr_make_add(
const int mythread,
const int La, const int Lb, const int Lc, const int Ld ) {
int hrr_mem, ma, mb, mc, md;
int na, nb, nab, nabd;
int Lab, Lcd;
int ****HADD;
HADD = V_HADD[mythread];
Lab = La + Lb;
Lcd = Lc + Ld;
hrr_mem = 0;
// VRRで生成される縮約積分のアドレス
mb = md = 0;
for ( ma=La; ma<=Lab; ma++ ) {
na = NNAO[ma];
for ( mc=Lc; mc<=Lcd; mc++ ) {
HADD[ma][mb][mc][md] = hrr_mem;
hrr_mem += ( na*NNAO[mc] );
}
}
// ABに対するHRRのアドレス
md = 0;
for ( mb=1; mb<=Lb; mb++ ) {
nb = NNAO[mb];
for ( ma=La; ma<=(Lab-mb); ma++ ) {
nab = nb * NNAO[ma];
for ( mc=Lc; mc<=Lcd; mc++ ) {
HADD[ma][mb][mc][md] = hrr_mem;
hrr_mem += ( nab * NNAO[mc]);
}
}
}
// CDに対するHRRのアドレス
ma = La;
mb = Lb;
nab = NNAO[La]*NNAO[Lb];
for ( md=1; md<=Ld; md++ ) {
nabd = nab*NNAO[md];
for ( mc=Lc; mc<=(Lcd-md); mc++ ) {
HADD[ma][mb][mc][md] = hrr_mem;
hrr_mem += nabd * NNAO[mc];
}
}
return hrr_mem;
}
static void ofmo_vrr_finalize() {
int i, nthreads;
nthreads = omp_get_max_threads();
for ( i=0; i<nthreads; i++ ) {
if ( V_VADD[i] ) ofmo_free_imatrix( V_VADD[i] );
if ( V_Mmin[i] ) ofmo_free_imatrix( V_Mmin[i] );
if ( V_Mindex[i] ) ofmo_free_imatrix( V_Mindex[i] );
if ( V_IJindex[i] ) ofmo_free_imatrix( V_IJindex[i] );
if ( V_KLindex[i] ) ofmo_free_imatrix( V_KLindex[i] );
if ( V_ev[i] ) free( V_ev[i] );
if ( DINTEG_MASTER[i] ) free( DINTEG_MASTER[i] );
}
free( V_VADD );
free( V_Mmin );
free( V_Mindex );
free( V_IJindex );
free( V_KLindex );
free( V_ev );
free( DINTEG_MASTER );
V_VADD = NULL;
V_Mmin = NULL;
V_Mindex = NULL;
V_IJindex = NULL;
V_KLindex = NULL;
V_ev = NULL;
DINTEG_MASTER = NULL;
}
/* VRR初期化関数
* 1回だけ呼び出す必要あり */
static int ofmo_vrr_init( const int maxlqn ) {
int Lab;
int nthreads;
static int called = false;
if ( called ) return 0;
ofmo_index_init( 2*maxlqn );
NNAO = ofmo_getadd_nnao();
LAOT = ofmo_getadd_laot();
ANGM = ofmo_getadd_angm();
INDX = ofmo_getadd_indx();
NAM = ofmo_getadd_nam();
Lab = maxlqn + maxlqn;
nthreads = omp_get_max_threads();
V_VADD = (int***)malloc( sizeof(int**) * nthreads );
V_Mindex = (int***)malloc( sizeof(int**) * nthreads );
V_KLindex = (int***)malloc( sizeof(int**) * nthreads );
V_IJindex = (int***)malloc( sizeof(int**) * nthreads );
V_Mmin = (int***)malloc( sizeof(int**) * nthreads );
V_ev = (double**)malloc( sizeof(double*) * nthreads );
DINTEG_MASTER = (double**)malloc( sizeof(double*) * nthreads );
#pragma omp parallel
{
int mythread, vrr_mem, n, n4;
n = NNAO[maxlqn];
n4 = n*n*n*n;
mythread = omp_get_thread_num();
V_VADD[mythread] = ofmo_alloc_imatrix( Lab+1, Lab+1 );
V_Mmin[mythread] = ofmo_alloc_imatrix( Lab+1, Lab+1 );
V_Mindex[mythread] = ofmo_alloc_imatrix( Lab+1, Lab+1 );
V_KLindex[mythread] = ofmo_alloc_imatrix( Lab+1, Lab+1 );
V_IJindex[mythread] = ofmo_alloc_imatrix( Lab+1, Lab+1 );
vrr_mem = ofmo_vrr_make_add( mythread,
maxlqn, maxlqn, maxlqn, maxlqn, false );
V_ev[mythread] = (double*)malloc( sizeof(double) * vrr_mem );
DINTEG_MASTER[mythread] = (double*)malloc( sizeof(double) * n4 );
}
atexit( ofmo_vrr_finalize );
called = true;
return 0;
}
static void ofmo_hrr_finalize() {
int nthreads, i;
nthreads = omp_get_max_threads();
for ( i=0; i<nthreads; i++ ) {
if ( V_eh[i] ) free( V_eh[i] );
if ( V_HADD[i] ) ofmo_free_i4d( V_HADD[i] );
}
free( V_eh );
free( V_HADD );
V_eh = NULL;
V_HADD = NULL;
}
// HRRの初期化関数(1回だけ呼び出せばよい)
static int ofmo_hrr_init( const int maxlqn ) {
int nthreads;
static int called = false;
if ( called ) return 0;
nthreads = omp_get_max_threads();
V_HADD = (int*****)malloc( sizeof(int****) * nthreads );
V_eh = (double**)malloc( sizeof(double*) * nthreads );
NAP = ofmo_getadd_nap();
DFACT = ofmo_getadd_dfact();
atexit( ofmo_hrr_finalize );
#pragma omp parallel
{
int mythread, hrr_mem;
mythread = omp_get_thread_num();
V_HADD[mythread] =
ofmo_alloc_i4d( 2*maxlqn+1, maxlqn+1, 2*maxlqn+1, maxlqn+1);
hrr_mem = ofmo_hrr_make_add( mythread,
maxlqn, maxlqn, maxlqn, maxlqn );
V_eh[mythread] = (double*)malloc( sizeof(double) * hrr_mem );
}
called = true;
return 0;
}
// 確保した配列のアドレスを返す関数
double* ofmo_os_getadd_eri( const int mythread ) {
return DINTEG_MASTER[mythread];
}
double* ofmo_os_getadd_vrr( const int mythread ) {
return V_ev[mythread];
}
double* ofmo_os_getadd_hrr( const int mythread ) {
return V_eh[mythread];
}
// 1つの原始積分に対するVRR計算を行う(縮約はしていない)
static int ofmo_vrr_calc(
const int mythread,
const int La, const int Lb, const int Lc, const int Ld,
const double T, const double cssss,
const double zeta2, const double eta2, const double ze2,
const double rz, const double re,
const double PA[3], const double WP[3],
const double QC[3], const double WQ[3] ) {
int mab, mcd, mmax, m, ix;
int Lab, Lcd, Labcd, Lmin;
int iao, iao0, iao1, iaom, iaom0, iaomm, iaomm0;
int jao, jao0, jao1, jaom, jaom0, jaomm, jaomm0;
int Ip00, I000, I001, I100, I101, I011;
int I0p0, I010;
int nia, nic;
int **Mindex, **KLindex, **IJindex, **Mmin, **VADD;
double *ev;
Mindex = V_Mindex[mythread];
KLindex = V_KLindex[mythread];
IJindex = V_IJindex[mythread];
Mmin = V_Mmin[mythread];
VADD = V_VADD[mythread];
ev = V_ev[mythread];
Lab = La + Lb;
Lcd = Lc + Ld;
Labcd = Lab + Lcd;
// (ss,ss)
fmt( &ev[VADD[0][0]], Labcd, T, cssss );
// (xs,ss) (x>=p)
for ( mab=1; mab<=Lab; mab++ ) {
mmax = Labcd - mab;
iao0 = LAOT[mab];
iao1 = iao0 + NNAO[mab];
iaom0 = LAOT[mab-1];
if ( mab > 1 ) iaomm0 = LAOT[mab-2];
for ( m=0; m<=mmax; m++ ) {
for ( iao=iao0; iao<iao1; iao++ ) {
ix = INDX[iao];
iaom = NAM[iao][ix];
nia = ANGM[iaom][ix];
Ip00 = VADD[mab][0]
+ m*Mindex[mab][0] + (iao-iao0)*IJindex[mab][0];
I000 = VADD[mab-1][0]
+ m*Mindex[mab-1][0] + (iaom-iaom0)*IJindex[mab-1][0];
I001 = I000 + Mindex[mab-1][0];
ev[Ip00] = PA[ix]*ev[I000] + WP[ix]*ev[I001];
if ( nia > 0 ) {
iaomm = NAM[iaom][ix];
I100 = VADD[mab-2][0]
+ m*Mindex[mab-2][0]
+ (iaomm-iaomm0)*IJindex[mab-2][0];
I101 = I100 + Mindex[mab-2][0];
ev[Ip00] += (double)nia*zeta2*(ev[I100] - rz*ev[I101] );
}
}
}
}
// (xs,ys) (y>=p)
for ( mcd=1; mcd<=Lcd; mcd++ ) {
mmax = Lcd - mcd;
jao0 = LAOT[mcd];
jao1 = jao0 + NNAO[mcd];
jaom0 = LAOT[mcd-1];
if ( mcd > 1 ) jaomm0 = LAOT[mcd-2];
Lmin = La - (Lcd-mcd);
if ( Lmin < 0 ) Lmin = 0;
for ( mab=Lmin; mab<=Lab; mab++ ) {
iao0 = LAOT[mab];
iao1 = iao0 + NNAO[mab];
if ( mab>0 ) iaom0 = LAOT[mab-1];
for ( m=Mmin[mab][mcd]; m<=mmax; m++ ) {
for ( iao=iao0; iao<iao1; iao++ ) {
for ( jao=jao0; jao<jao1; jao++ ) {
ix = INDX[jao];
jaom = NAM[jao][ix];
I0p0 = VADD[mab][mcd]
+ (m-Mmin[mab][mcd])*Mindex[mab][mcd]
+ (jao-jao0)*KLindex[mab][mcd]
+ (iao-iao0)*IJindex[mab][mcd];
I000 = VADD[mab][mcd-1]
+ (m-Mmin[mab][mcd-1])*Mindex[mab][mcd-1]
+ (jaom-jaom0)*KLindex[mab][mcd-1]
+ (iao-iao0)*IJindex[mab][mcd-1];
I001 = I000 + Mindex[mab][mcd-1];
ev[I0p0] = QC[ix]*ev[I000] + WQ[ix]*ev[I001];
nic = ANGM[jaom][ix];
if ( nic > 0 ) {
jaomm = NAM[jaom][ix];
I010 = VADD[mab][mcd-2]
+ (m-Mmin[mab][mcd-2])*Mindex[mab][mcd-2]
+ (jaomm - jaomm0) * KLindex[mab][mcd-2]
+ (iao-iao0) * IJindex[mab][mcd-2];
I011 = I010 + Mindex[mab][mcd-2];
ev[I0p0] +=
(double)nic*eta2*(ev[I010] - re*ev[I011]);
}
nia = ANGM[iao][ix];
if ( nia > 0 ) {
iaom = NAM[iao][ix];
I101 = VADD[mab-1][mcd-1]
+ (m+1-Mmin[mab-1][mcd-1])*Mindex[mab-1][mcd-1]
+ (jaom-jaom0) * KLindex[mab-1][mcd-1]
+ (iaom-iaom0) * IJindex[mab-1][mcd-1];
ev[I0p0] += (double)nia*ze2*ev[I101];
}
}
}
}
}
}
return 0;
}
/** HRRを行う関数
* 外部変数
* LAOT[CS type] = CSに含まれる先頭AO番号
* NNAO[CS type] = CSに含まれるAO数
* INDX[AO type] = 添字
* HADD[ma][mb][mc][md] = 各軌道量子数4重対の先頭アドレス
* eh[] = HRRに関連する縮約積分保存に使用する配列
* */
static int ofmo_hrr_calc(
const int mythread,
const int La, const int Lb, const int Lc, const int Ld,
const double BA[3], const double DC[3]
) {
int ma, mb, mc, md;
int Lab, Lcd;
int ix;
int iao, iao0, iao1, iaop, iaop0;
int jao, jao0, jao1, jaom, jaom0;
int kao, kao0, kao1, kaop, kaop0, k;
int lao, lao0, lao1, laom, laom0;
int add01, add10, add00;
int I01, I10, I00, IJ01, IJ10, IJ00, IJK01, IJK00;
int IJKL01, IJKL10, IJKL00;
double *d01, *d10, *d00;
int ****HADD;
double *eh;
HADD = V_HADD[mythread];
eh = V_eh[mythread];
Lab = La + Lb;
Lcd = Lc + Ld;
// ABに対するHRR
for ( mb=1; mb<=Lb; mb++ ) {
jao0 = LAOT[mb];
jao1 = jao0 + NNAO[mb];
jaom0 = LAOT[mb-1];
for ( ma=La; ma<=(Lab-mb); ma++ ) {
iao0 = LAOT[ma];
iao1 = iao0 + NNAO[ma];
iaop0 = LAOT[ma+1];
for ( mc=Lc; mc<=Lcd; mc++ ) {
kao0 = LAOT[mc];
kao1 = kao0 + NNAO[mc];
add01 = HADD[ma ][mb ][mc][0];
add10 = HADD[ma+1][mb-1][mc][0];
add00 = HADD[ma ][mb-1][mc][0];
for ( iao=iao0; iao<iao1; iao++ ) {
I01 = add01 + (iao-iao0)*NNAO[mb ]*NNAO[mc];
I00 = add00 + (iao-iao0)*NNAO[mb-1]*NNAO[mc];
for ( jao=jao0; jao<jao1; jao++ ) {
ix = INDX[jao];
jaom = NAM[jao][ix];
iaop = NAP[iao][ix];
IJ01 = I01 + (jao-jao0)*NNAO[mc];
IJ10 = add10 + (iaop-iaop0)*NNAO[mb-1]*NNAO[mc]
+ (jaom-jaom0)*NNAO[mc];
IJ00 = I00 + (jaom-jaom0)*NNAO[mc];
d01 = &eh[IJ01];
d10 = &eh[IJ10];
d00 = &eh[IJ00];
for ( kao=kao0, k=0; kao<kao1; kao++, k++ )
d01[k] = d10[k] - BA[ix]*d00[k];
}
}
} // for (mc)
} // for (ma)
} // for (mb);
// CDに対するHRR
ma = La;
mb = Lb;
iao0 = LAOT[ma];
iao1 = iao0 + NNAO[ma];
jao0 = LAOT[mb];
jao1 = jao0 + NNAO[mb];
for ( md=1; md<=Ld; md++ ) {
lao0 = LAOT[md];
lao1 = lao0 + NNAO[md];
laom0 = LAOT[md-1];
for ( mc=Lc; mc<=(Lcd-md); mc++ ) {
kao0 = LAOT[mc];
kao1 = kao0 + NNAO[mc];
kaop0 = LAOT[mc+1];
add01 = HADD[ma][mb][mc ][md ];
add10 = HADD[ma][mb][mc+1][md-1];
add00 = HADD[ma][mb][mc ][md-1];
for ( iao=iao0; iao<iao1; iao++ ) {
I01 = add01 + (iao-iao0)*NNAO[mb]*NNAO[mc ]*NNAO[md ];
I10 = add10 + (iao-iao0)*NNAO[mb]*NNAO[mc+1]*NNAO[md-1];
I00 = add00 + (iao-iao0)*NNAO[mb]*NNAO[mc ]*NNAO[md-1];
for ( jao=jao0; jao<jao1; jao++ ) {
IJ01 = I01 + (jao-jao0)*NNAO[mc ]*NNAO[md ];
IJ10 = I10 + (jao-jao0)*NNAO[mc+1]*NNAO[md-1];
IJ00 = I00 + (jao-jao0)*NNAO[mc ]*NNAO[md-1];
for ( kao=kao0; kao<kao1; kao++ ) {
IJK01 = IJ01 + (kao-kao0)*NNAO[md ];
IJK00 = IJ00 + (kao-kao0)*NNAO[md-1];
for ( lao=lao0; lao<lao1; lao++ ) {
ix = INDX[lao];
laom = NAM[lao][ix];
kaop = NAP[kao][ix];
IJKL01 = IJK01 + (lao-lao0);
IJKL10 = IJ10
+ (kaop-kaop0)*NNAO[md-1] + (laom-laom0);
IJKL00 = IJK00 + (laom-laom0);
eh[IJKL01] = eh[IJKL10] - DC[ix]*eh[IJKL00];
} // for (lao)
} // for (kao)
} // for (jao)
} // for (iao)
} // for (mc)
} // for (md)
return 0;
}
static int ofmo_hrr_clear(
const int mythread,
const int La, const int Lb, const int Lc, const int Ld ) {
int mab, mcd, Lab, Lcd;
int nab, nabcd, i;
int ****HADD;
double *th, *eh;
eh = V_eh[mythread];
HADD = V_HADD[mythread];
Lab = La + Lb;
Lcd = Lc + Ld;
for ( mab=La; mab<=Lab; mab++ ) {
nab = NNAO[mab];
for ( mcd=Lc; mcd<=Lcd; mcd++ ) {
nabcd = nab * NNAO[mcd];
th = &eh[ HADD[mab][0][mcd][0] ];
for ( i=0; i<nabcd; i++ ) th[i] = 0.e0;
}
}
return 0;
}
static int ofmo_vrr_cint(
const int mythread,
const int La, const int Lb, const int Lc, const int Ld ) {
int mab, mcd, Lab, Lcd;
int nab, nabcd, i;
double *tv, *th;
double *ev, *eh;
int **VADD, ****HADD;
ev = V_ev[mythread];
eh = V_eh[mythread];
VADD = V_VADD[mythread];
HADD = V_HADD[mythread];
Lab = La + Lb;
Lcd = Lc + Ld;
for ( mab=La; mab<=Lab; mab++ ) {
nab = NNAO[mab];
for ( mcd=Lc; mcd<=Lcd; mcd++ ) {
nabcd = nab * NNAO[mcd];
tv = &ev[ VADD[mab][mcd] ];
th = &eh[ HADD[mab][0][mcd][0] ];
for ( i=0; i<nabcd; i++ ) th[i] += tv[i];
}
}
return 0;
}
static int ofmo_hrr_coef(
const int mythread,
const int La, const int Lb, const int Lc, const int Ld,
double DINT[] ) {
int i, j, k, l, iao, jao, kao, lao, ix;
double *th, coef_a, coef_ab, coef_abc;
//
double *eh;
int ****HADD;
eh = V_eh[mythread];
HADD = V_HADD[mythread];
th = &eh[ HADD[La][Lb][Lc][Ld] ];
ix = 0;
for ( i=0, iao=LAOT[La]; i<NNAO[La]; i++, iao++ ) {
coef_a = DFACT[iao];
for ( j=0, jao=LAOT[Lb]; j<NNAO[Lb]; j++, jao++ ) {
coef_ab = coef_a * DFACT[jao];
for ( k=0, kao=LAOT[Lc]; k<NNAO[Lc]; k++, kao++ ) {
coef_abc = coef_ab * DFACT[kao];
for ( l=0, lao=LAOT[Ld]; l<NNAO[Ld]; l++, lao++ ) {
DINT[ix] = coef_abc * DFACT[lao] * th[ix];
ix++;
}
}
}
}
return 0;
}
static int ofmo_twoint_core_xxxx(
const int mythread,
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
const int *nijps, const double vzeta[], const double vdkab[],
const double vxiza[], const double BA[3],
const int *nklps, const double veta[], const double vdkcd[],
const double vxizc[], const double DC[3], const double AC[3],
double DINT[] ) {
int ijps, klps, i;
double cssss, zeta, dkab, xiza, eta, xizc, dk, T;
double zeta2, eta2, ze2, rz, re, PA[3], WP[3], QC[3], WQ[3];
double PQ2, sqrho, rho, PC[3], QP[3];
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
ofmo_hrr_clear( mythread, La, Lb, Lc, Ld );
for ( ijps=0; ijps<(*nijps); ijps++ ) {
zeta = vzeta[ijps];
dkab = vdkab[ijps];
xiza = vxiza[ijps];
zeta2 = HALF * zeta;
for ( i=0; i<3; i++ ) {
PC[i] = AC[i] + xiza*BA[i];
PA[i] = xiza * BA[i];
}
for ( klps=0; klps<(*nklps); klps++ ) {
eta = veta[klps];
dk = dkab * vdkcd[klps];
xizc = vxizc[klps];
eta2 = HALF * eta;
PQ2 = ZERO;
for ( i=0; i<3; i++ ) {
QC[i] = xizc*DC[i];
QP[i] = xizc*DC[i] - PC[i];
PQ2 += QP[i]*QP[i];
}
sqrho = sqrt(1.e0/(zeta+eta));
rho = sqrho*sqrho;
rz = rho * zeta;
re = rho * eta;
ze2 = re * zeta2;
for ( i=0; i<3; i++ ) {
WP[i] = rz*QP[i];
WQ[i] = rz*QP[i] - QP[i];
}
T = rho * PQ2;
cssss = sqrho * dk;
ofmo_vrr_calc( mythread, La, Lb, Lc, Ld,
T, cssss, zeta2, eta2, ze2, rz, re, PA, WP, QC, WQ );
ofmo_vrr_cint( mythread, La, Lb, Lc, Ld );
}
}
ofmo_hrr_calc( mythread, La, Lb, Lc, Ld, BA, DC );
ofmo_hrr_coef( mythread, La, Lb, Lc, Ld, DINT );
return 0;
}
static int schwarz_calc_ps_pair_params(
const double prim_exp[], const double prim_coe[],
const int ips0, const int ips1,
const int jps0, const int jps1,
const double AB2,
double vzeta[], double vdkps[], double vxiza[] ) {
double zeta_a, coef_a, zeta_b, coef_b;
double sqrz, rz, xiza, Kab, zeta, coef;
int npps, ips, jps;
npps = 0;
for ( ips=ips0; ips<ips1; ips++ ) {
zeta_a = prim_exp[ips];
coef_a = prim_coe[ips];
for ( jps=jps0; jps<jps1; jps++ ) {
zeta_b = prim_exp[jps];
coef_b = prim_coe[jps];
zeta = zeta_a + zeta_b;
coef = coef_a * coef_b;
sqrz = 1.e0 / sqrt( zeta );
rz = sqrz * sqrz;
xiza = zeta_b * rz;
Kab = _CK_ * coef * sqrz * rz * exp( -xiza * zeta_a * AB2 );
if ( fabs(Kab) > EPS_PS_PAIR ) {
vzeta[npps] = rz;
vdkps[npps] = Kab;
vxiza[npps] = xiza;
npps++;
}
}
}
return npps;
}
static double ofmo_schwarz_core_xxxx(
const int mythread,
const int *pLa, const int *pLb,
const int *nijps, const double vzeta[], const double vdkab[],
const double vxiza[], const double BA[3], const double AB2 ) {
int ijps, klps, i;
double cssss, zeta, dkab, xiza, eta, xizc, dk, T, dxi;
double zeta2, eta2, ze2, rz, re, PA[3], WP[3], QC[3], WQ[3];
double PQ2, sqrho, rho, QP[3];
int La=*pLa, Lb=*pLb, Lc=*pLa, Ld=*pLb;
double *DINTEG;
int na, nb, nab1, j, ij, i0;
double dmaxint;
na = NNAO[La];
nb = NNAO[Lb];
nab1 = na*nb+1;
DINTEG = DINTEG_MASTER[mythread];
ofmo_hrr_clear( mythread, La, Lb, Lc, Ld );
for ( ijps=0; ijps<(*nijps); ijps++ ) {
zeta = vzeta[ijps];
dkab = vdkab[ijps];
xiza = vxiza[ijps];
zeta2 = HALF * zeta;
for ( i=0; i<3; i++ ) PA[i] = xiza * BA[i];
for ( klps=0; klps<(*nijps); klps++ ) {
eta = vzeta[klps];
dk = dkab * vdkab[klps];
xizc = vxiza[klps];
dxi = xizc - xiza;
eta2 = HALF * eta;
PQ2 = dxi*dxi*AB2;
for ( i=0; i<3; i++ ) {
QC[i] = xizc* BA[i];
QP[i] = dxi * BA[i];
}
sqrho = sqrt(1.e0/(zeta+eta));
rho = sqrho*sqrho;
rz = rho * zeta;
re = rho * eta;
ze2 = re * zeta2;
for ( i=0; i<3; i++ ) {
WP[i] = rz*QP[i];
WQ[i] = rz*QP[i] - QP[i];
}
T = rho * PQ2;
cssss = sqrho * dk;
ofmo_vrr_calc( mythread, La, Lb, Lc, Ld,
T, cssss, zeta2, eta2, ze2, rz, re, PA, WP, QC, WQ );
ofmo_vrr_cint( mythread, La, Lb, Lc, Ld );
}
}
ofmo_hrr_calc( mythread, La, Lb, Lc, Ld, BA, BA );
ofmo_hrr_coef( mythread, La, Lb, Lc, Ld, DINTEG );
//
dmaxint=0.e0;
for ( i=0; i<na; i++ ) {
i0 = i*nb*nab1;
for ( j=0; j<nb; j++ ) {
ij = i0 + j*nab1;
if ( fabs(DINTEG[ij]) > dmaxint ) dmaxint = fabs(DINTEG[ij]);
}
}
return dmaxint;
}
int ofmo_cutoff_xx(
// input arguments
const int *pLa, const int *pLb, const int leading_cs[],
const int shel_tem[], const int shel_atm[], const int shel_add[],
const double atom_x[], const double atom_y[],
const double atom_z[],
const double prim_exp[], const double prim_coe[],
// output arguments
int leading_cs_pair[],
double csp_schwarz[], int csp_ics[], int csp_jcs[],
int csp_leading_ps_pair[],
double psp_zeta[], double psp_dkps[], double psp_xiza[] ) {
int ics, ics0, ics1, ips0, ips1, iat;
int jcs, jcs0, jcs1, jps0, jps1, jat;
int npps, ipps, ncs_pair, nps_pair;
int i, Lab, jcsmax;
double BA[3], A[3], B[3], AB2;
double max_eri;
double vzeta[MAXNPSPAIR], vdkps[MAXNPSPAIR], vxiza[MAXNPSPAIR];
int La=*pLa, Lb=*pLb, mythread;
mythread = omp_get_thread_num();
ofmo_vrr_make_add( mythread, La, Lb, La, Lb, true );
ofmo_hrr_make_add( mythread, La, Lb, La, Lb );
Lab = La*(La+1)/2 + Lb;
ncs_pair = leading_cs_pair[Lab];
nps_pair = csp_leading_ps_pair[ncs_pair];
ics0 = leading_cs[La]; // La=2
ics1 = leading_cs[La+1];
jcs0 = leading_cs[Lb]; // Lb=2
jcs1 = leading_cs[Lb+1];
for ( ics=ics0; ics<ics1; ics++ ) {
ips0 = shel_add[ics];
ips1 = ips0 + shel_tem[ics];
iat = shel_atm[ics];
A[0]=atom_x[ iat ]; A[1]=atom_y[ iat ]; A[2]=atom_z[ iat ];
jcsmax = ( Lb==La ? ics+1 : jcs1 );
for ( jcs=jcs0; jcs<jcsmax; jcs++ ) {
jps0 = shel_add[jcs];
jps1 = jps0 + shel_tem[jcs];
jat = shel_atm[jcs];
B[0]=atom_x[ jat ]; B[1]=atom_y[ jat ]; B[2]=atom_z[ jat ];
AB2 = 0.e0;
for ( i=0; i<3; i++ ) {
BA[i] = B[i] - A[i];
AB2 += BA[i]*BA[i];
}
npps = schwarz_calc_ps_pair_params(
prim_exp, prim_coe, ips0, ips1, jps0, jps1, AB2,
vzeta, vdkps, vxiza );
if ( npps == 0 ) continue;
max_eri = ofmo_schwarz_core_xxxx(
mythread, pLa, pLb,
&npps, vzeta, vdkps, vxiza, BA, AB2 );
if ( max_eri > EPS_CS_PAIR2 ) {
csp_schwarz[ncs_pair] = sqrt( max_eri );
csp_ics[ncs_pair] = ics;
csp_jcs[ncs_pair] = jcs;
for ( ipps=0; ipps<npps; ipps++ ) {
psp_zeta[nps_pair] = vzeta[ipps];
psp_dkps[nps_pair] = vdkps[ipps];
psp_xiza[nps_pair] = vxiza[ipps];
nps_pair++;
}
csp_leading_ps_pair[ncs_pair+1] = nps_pair;
ncs_pair++;
}
} // jcs
} // ics
leading_cs_pair[Lab+1] = ncs_pair;
return 0;
}
// 初期化関数(1回だけ呼び出せばよい)
int ofmo_OS_integ_init( const int maxlqn ) {
static int called = false;
if ( called ) return 0;
ofmo_vrr_init( maxlqn );
ofmo_hrr_init( maxlqn );
//
double pi, t;
pi = 4.e0 * atan( 1.e0 );
t = 2.e0 * pi * pi * sqrt( pi );
_CK_ = sqrt( t );
called = true;
return 0;
}
// 縮約積分計算関数
int ofmo_twoint_xxxx(
const int *pnworkers, const int *pworkerid,
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// for partially direct SCF
const long *pebuf_max_nzeri, long *ebuf_non_zero_eri,
double ebuf_val[], short int ebuf_ind4[],
int *last_ijcs, int *last_klcs ) {
int Lab, Lcd, i, j, k, l, ipat, ix;
int I2, IJ, K2, KL;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1, max_klcs ;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe, coe0;
double *DINTEG;
long nzeri, max_nzeri, nzeri4;
//
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
long ebuf_max_nzeri = *pebuf_max_nzeri;
//
int na, nb, nc, nd;
//
int Labcd, lambda;
//
int mythread;
mythread = omp_get_thread_num();
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
ofmo_vrr_make_add( mythread, La, Lb, Lc, Ld, true );
ofmo_hrr_make_add( mythread, La, Lb, Lc, Ld );
DINTEG = DINTEG_MASTER[mythread];
na = NNAO[La];
nb = NNAO[Lb];
nc = NNAO[Lc];
nd = NNAO[Ld];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
//
Labcd = Lab*(Lab+1)/2 + Lcd;
lambda = La + Lb + Lc + Ld;
ijcs0 = leading_cs_pair[Lab];
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
nzeri = *ebuf_non_zero_eri;
max_nzeri = ebuf_max_nzeri - na*nb*nc*nd;
nzeri4 = nzeri*4;
if ( nzeri >= max_nzeri ) {
*last_ijcs = ijcs0+workerid;
*last_klcs = klcs0 - 1;
*ebuf_non_zero_eri = nzeri;
return OFMO_EBUF_FULL;
}
for ( ijcs=ijcs0+workerid; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
max_klcs = ( Lab == Lcd ? ijcs+1 : klcs1 );
for ( klcs=klcs0; klcs<max_klcs; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao0 = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
ofmo_twoint_core_xxxx( mythread,
&La, &Lb, &Lc, &Ld,
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DINTEG );
/*// debug
#pragma omp master
{
if ( Lab==1 && Lcd==0 && fabs(DINTEG[0]) > 1.e-7 ) {
printf("ijcs, klcs= %4d, %4d E[0]= %10.7f\n",
ijcs, klcs, DINTEG[0] );
fflush(stdout);
}
}*/
ipat = ((Lab != Lcd) || (ics==kcs && jcs>lcs) ? true : false);
#ifdef SORT_CSP
int ijgekl = (ics>kcs);
if (ics==kcs) ijgekl = (jcs>=lcs);
if (!ijgekl) ipat = ( (ics==kcs && jcs<lcs) ? true : false);
#endif
for ( i=0, iao=iao0, ix=0; i<na; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<nb; j++, jao++ ) {
if ( jao>iao ) { ix+=nc*nd; continue; }
IJ = I2 + jao;
coe0 = ( iao==jao ? HALF : ONE );
for ( k=0, kao=kao0; k<nc; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<nd; l++, lao++, ix++ ) {
if ( lao>kao ) continue;
if ( fabs(DINTEG[ix]) > eps_eri ) {
KL = K2 + lao;
#ifndef SORT_CSP
if ( IJ >= KL ) {
#else
if ((ijgekl&&IJ>=KL) || (!ijgekl&&KL>=IJ)) {
#endif
coe = coe0;
if ( kao==lao ) coe *= HALF;
if ( KL == IJ ) coe *= HALF;
ebuf_val[nzeri] = coe*DINTEG[ix];
ebuf_ind4[nzeri4+0] = (short int)iao;
ebuf_ind4[nzeri4+1] = (short int)jao;
ebuf_ind4[nzeri4+2] = (short int)kao;
ebuf_ind4[nzeri4+3] = (short int)lao;
nzeri++;
nzeri4+=4;
} else if ( ipat ) {
coe = coe0;
if ( kao==lao ) coe*=HALF;
ebuf_val[nzeri] = coe*DINTEG[ix];
ebuf_ind4[nzeri4+0] = (short int)kao;
ebuf_ind4[nzeri4+1] = (short int)lao;
ebuf_ind4[nzeri4+2] = (short int)iao;
ebuf_ind4[nzeri4+3] = (short int)jao;
nzeri++;
nzeri4+=4;
}
}
} // l
} // k
} // j
} // i
if ( nzeri >= max_nzeri ) {
*last_ijcs = ijcs;
*last_klcs = klcs;
*ebuf_non_zero_eri = nzeri;
return OFMO_EBUF_FULL;
}
} // for ( klcs );
} // for ( ijcs );
*ebuf_non_zero_eri = nzeri;
return OFMO_EBUF_NOFULL;
}
//
// 縮約積分計算関数
int ofmo_twoint_direct_xxxx(
const int *pnworkers, const int *pworkerid,
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// for direct SCF
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
//
int Lab, Lcd, i, j, k, l, ipat, ix;
int I2, IJ, K2, KL;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1, max_klcs ;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe, coe0;
double *DINTEG;
//
int na, nb, nc, nd;
//
int mythread;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
mythread = omp_get_thread_num();
ofmo_vrr_make_add( mythread, La, Lb, Lc, Ld, true );
ofmo_hrr_make_add( mythread, La, Lb, Lc, Ld );
DINTEG = DINTEG_MASTER[mythread];
na = NNAO[La];
nb = NNAO[Lb];
nc = NNAO[Lc];
nd = NNAO[Ld];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri -= na*nb*nc*nd;
nzeri4 = nzeri*4;
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = nzeri4 = 0;
}
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
max_klcs = ( Lab == Lcd ? ijcs+1 : klcs1 );
for ( ; klcs<max_klcs; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
if ( val_ab*val_cd*ofmo_twoint_dmax6(ics,jcs,kcs,lcs) < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao0 = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
ofmo_twoint_core_xxxx( mythread,
&La, &Lb, &Lc, &Ld,
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DINTEG );
ipat = ((Lab != Lcd) || (ics==kcs && jcs>lcs) ? true : false);
#ifdef SORT_CSP
int ijgekl = (ics>kcs);
if (ics==kcs) ijgekl = (jcs>=lcs);
if (!ijgekl) ipat = ( (ics==kcs && jcs<lcs) ? true : false);
#endif
for ( i=0, iao=iao0, ix=0; i<na; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<nb; j++, jao++ ) {
if ( jao>iao ) { ix+=nc*nd; continue; }
IJ = I2 + jao;
coe0 = ( iao==jao ? HALF : ONE );
for ( k=0, kao=kao0; k<nc; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<nd; l++, lao++, ix++ ) {
if ( lao>kao ) continue;
if ( fabs(DINTEG[ix]) > eps_eri ) {
KL = K2 + lao;
#ifndef SORT_CSP
if ( IJ >= KL ) {
#else
if ((ijgekl&&IJ>=KL) || (!ijgekl&&KL>=IJ)) {
#endif
coe = coe0;
if ( kao==lao ) coe *= HALF;
if ( KL == IJ ) coe *= HALF;
etmp_val[nzeri] = coe*DINTEG[ix];
etmp_ind4[nzeri4+0] = (short int)iao;
etmp_ind4[nzeri4+1] = (short int)jao;
etmp_ind4[nzeri4+2] = (short int)kao;
etmp_ind4[nzeri4+3] = (short int)lao;
nzeri++;
nzeri4+=4;
} else if ( ipat ) {
coe = coe0;
if ( kao==lao ) coe*=HALF;
etmp_val[nzeri] = coe*DINTEG[ix];
etmp_ind4[nzeri4+0] = (short int)kao;
etmp_ind4[nzeri4+1] = (short int)lao;
etmp_ind4[nzeri4+2] = (short int)iao;
etmp_ind4[nzeri4+3] = (short int)jao;
nzeri++;
nzeri4+=4;
}
}
} // l
} // k
} // j
} // i
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
|
mpi-openmp.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#warning Your compiler does not support OpenMP, at least with the flags you're using.
#endif
#include <mpi.h>
#define MPI_THREAD_STRING(level) \
( level==MPI_THREAD_SERIALIZED ? "THREAD_SERIALIZED" : \
( level==MPI_THREAD_MULTIPLE ? "THREAD_MULTIPLE" : \
( level==MPI_THREAD_FUNNELED ? "THREAD_FUNNELED" : \
( level==MPI_THREAD_SINGLE ? "THREAD_SINGLE" : "THIS_IS_IMPOSSIBLE" ) ) ) )
int main(int argc, char ** argv)
{
/* These are the desired and available thread support.
A hybrid code where all MPI calls are made from the main thread can used FUNNELED.
If threads are making MPI calls, MULTIPLE is appropriate. */
int requested # MPI_THREAD_FUNNELED, provided;
/* MPICH2 will be substantially more efficient than OpenMPI
for MPI_THREAD_{FUNNELED,SERIALIZED} but this is unlikely
to be a serious bottleneck. */
MPI_Init_thread(&argc, &argv, requested, &provided);
if (provided<requested)
{
printf("MPI_Init_thread provided %s when %s was requested. Exiting. \n",
MPI_THREAD_STRING(provided), MPI_THREAD_STRING(requested) );
exit(1);
}
int world_size, world_rank;
MPI_Comm_size(MPI_COMM_WORLD,&world_size);
MPI_Comm_rank(MPI_COMM_WORLD,&world_rank);
printf("Hello from %d of %d processors\n", world_rank, world_size);
#ifdef _OPENMP
#pragma omp parallel
{
int omp_id # omp_get_thread_num();
int omp_num # omp_get_num_threads();
printf("MPI rank # %2d OpenMP thread # %2d of %2d \n", world_rank, omp_id, omp_num);
fflush(stdout);
}
#else
printf("MPI rank # %2d \n", world_rank);
fflush(stdout);
#endif
MPI_Finalize();
return 0;
}
|
laplace_par.h | #ifndef _LAPLACE_PAR_
#define _LAPLACE_PAR_
#include<omp.h>
template<int SIZE>
inline void initialize(double a[SIZE + 2][SIZE + 2], double b[SIZE + 2][SIZE + 2])
{
//TODO implement your solution in here
#pragma omp parallel for
for (int i = 0; i < SIZE + 2; i++)
for (int j = 0; j < SIZE + 2; j++)
{
a[i][j] = 0.0;
b[i][j] = 0.0;
}
}
template<int SIZE>
inline void time_step(double a[SIZE + 2][SIZE + 2], double b[SIZE + 2][SIZE + 2], int n)
{
//TODO implement your solution in here
if (n % 2 == 0)
{
#pragma omp parallel for
for (int i = 1; i < SIZE + 1; i++)
for (int j = 1; j < SIZE + 1; j++)
b[i][j] = (a[i + 1][j] + a[i - 1][j] + a[i][j - 1] + a[i][j + 1]) *0.25;
}
else
{
#pragma omp parallel for
for (int i = 1; i < SIZE + 1; i++)
for (int j = 1; j < SIZE + 1; j++)
a[i][j] = (b[i + 1][j] + b[i - 1][j] + b[i][j - 1] + b[i][j + 1])*0.25;
}
}
#endif // !_LAPLACE_PAR_
|
permute.c | // Permutations that enable SSE and AVX vectorization.
#include "fasttransforms.h"
void permute(const double * A, double * B, const int N, const int M, const int L) {
#pragma omp parallel for if (N < 2*M)
for (int j = 0; j < M; j += L)
for (int i = 0; i < L*N; i++)
B[(L*i)%(L*N)+(L*i)/(L*N)+j*N] = A[i+j*N];
}
void permute_t(double * A, const double * B, const int N, const int M, const int L) {
#pragma omp parallel for if (N < 2*M)
for (int j = 0; j < M; j += L)
for (int i = 0; i < L*N; i++)
A[i+j*N] = B[(L*i)%(L*N)+(L*i)/(L*N)+j*N];
}
void permute_sph(const double * A, double * B, const int N, const int M, const int L) {
if (L == 2) {
for (int i = 0; i < N; i++)
B[i] = A[i];
permute(A+N, B+N, N, M-1, 2);
}
else {
permute_sph(A, B, N, M%(2*L), L/2);
permute(A+(M%(2*L))*N, B+(M%(2*L))*N, N, M-M%(2*L), L);
}
}
void permute_t_sph(double * A, const double * B, const int N, const int M, const int L) {
if (L == 2) {
for (int i = 0; i < N; i++)
A[i] = B[i];
permute_t(A+N, B+N, N, M-1, 2);
}
else {
permute_t_sph(A, B, N, M%(2*L), L/2);
permute_t(A+(M%(2*L))*N, B+(M%(2*L))*N, N, M-M%(2*L), L);
}
}
void permute_tri(const double * A, double * B, const int N, const int M, const int L) {
if (L == 2) {
permute(A, B, N, M, 2);
}
else {
permute_tri(A, B, N, M%(2*L), L/2);
permute(A+(M%(2*L))*N, B+(M%(2*L))*N, N, M-M%(2*L), L);
}
}
void permute_t_tri(double * A, const double * B, const int N, const int M, const int L) {
if (L == 2) {
permute_t(A, B, N, M, 2);
}
else {
permute_t_tri(A, B, N, M%(2*L), L/2);
permute_t(A+(M%(2*L))*N, B+(M%(2*L))*N, N, M-M%(2*L), L);
}
}
void swap(double * A, double * B, const int N) {
double tmp;
for (int i = 0; i < N; i++) {
tmp = A[i];
A[i] = B[i];
B[i] = tmp;
}
}
void warp(double * A, const int N, const int M, const int L) {
for (int j = 2; j <= L; j <<= 1) {
#pragma omp parallel for
for (int i = M%(4*L); i < M; i += 4*j)
swap(A+(i+j)*N, A+(i+j*2)*N, j*N);
}
}
void warp_t(double * A, const int N, const int M, const int L) {
for (int j = L; j >= 2; j >>= 1) {
#pragma omp parallel for
for (int i = M%(4*L); i < M; i += 4*j)
swap(A+(i+j)*N, A+(i+j*2)*N, j*N);
}
}
|
DecisionFunctionGenerator.h | /**
* rafl: DecisionFunctionGenerator.h
* Copyright (c) Torr Vision Group, University of Oxford, 2015. All rights reserved.
*/
#ifndef H_RAFL_DECISIONFUNCTIONGENERATOR
#define H_RAFL_DECISIONFUNCTIONGENERATOR
#include <utility>
#ifdef WITH_OPENMP
#include <omp.h>
#endif
#include "../examples/ExampleReservoir.h"
#include "../examples/ExampleUtil.h"
#include "DecisionFunction.h"
namespace rafl {
/**
* \brief An instance of an instantiation of a class template deriving from this one can be used to generate a decision function
* with which to split a set of examples.
*/
template <typename Label>
class DecisionFunctionGenerator
{
//#################### PROTECTED TYPEDEFS ####################
protected:
typedef boost::shared_ptr<const Example<Label> > Example_CPtr;
//#################### NESTED TYPES ####################
public:
/**
* \brief An instance of this struct represents a split of a set of examples into two subsets,
* based on their classification against a decision function.
*/
struct Split
{
/** The decision function that induced the split. */
DecisionFunction_Ptr m_decisionFunction;
/** The examples that were sent left by the decision function. */
std::vector<Example_CPtr> m_leftExamples;
/** The examples that were sent right by the decision function. */
std::vector<Example_CPtr> m_rightExamples;
};
//#################### PUBLIC TYPEDEFS ####################
public:
typedef boost::shared_ptr<Split> Split_Ptr;
typedef boost::shared_ptr<const Split> Split_CPtr;
//#################### PRIVATE VARIABLES ####################
private:
/* The split candidates. */
mutable std::vector<Split> m_splitCandidates;
//#################### DESTRUCTOR ####################
public:
/**
* \brief Destroys the generator.
*/
virtual ~DecisionFunctionGenerator() {}
//#################### PUBLIC ABSTRACT MEMBER FUNCTIONS ####################
public:
/**
* \brief Generates a candidate decision function to split the specified set of examples.
*
* \param examples The examples to split.
* \param randomNumberGenerator A random number generator.
* \return The candidate decision function.
*/
virtual DecisionFunction_Ptr generate_candidate_decision_function(const std::vector<Example_CPtr>& examples, const tvgutil::RandomNumberGenerator_Ptr& randomNumberGenerator) const = 0;
/**
* \brief Gets the parameters of the decision function generator as a string.
*
* \return The parameters of the decision function generator as a string.
*/
virtual std::string get_params() const = 0;
/**
* \brief Gets the type of the decision function generator.
*
* \return The type of the decision function generator.
*/
virtual std::string get_type() const = 0;
//#################### PUBLIC MEMBER FUNCTIONS ####################
public:
/**
* \brief Tries to pick an appropriate way in which to split the specified reservoir of examples.
*
* \param reservoir The reservoir of examples to split.
* \param candidateCount The number of candidates to evaluate.
* \param gainThreshold The minimum information gain that must be obtained from a split to make it worthwhile.
* \param inverseClassWeights The (optional) inverses of the L1-normalised class frequencies observed in the training data.
* \param randomNumberGenerator A random number generator.
* \return The chosen split, if one was suitable, or NULL otherwise.
*/
Split_CPtr split_examples(const ExampleReservoir<Label>& reservoir, int candidateCount, float gainThreshold, const boost::optional<std::map<Label,float> >& inverseClassWeights,
const tvgutil::RandomNumberGenerator_Ptr& randomNumberGenerator) const
{
std::vector<Example_CPtr> examples = reservoir.get_examples();
float initialEntropy = ExampleUtil::calculate_entropy(*reservoir.get_histogram(), inverseClassWeights);
#if 0
std::cout << "\nP: " << *reservoir.get_histogram() << ' ' << initialEntropy << '\n';
#endif
// Generate the split candidates.
if(static_cast<int>(m_splitCandidates.size()) != candidateCount) m_splitCandidates.resize(candidateCount);
for(int i = 0; i < candidateCount; ++i)
{
m_splitCandidates[i].m_decisionFunction = generate_candidate_decision_function(examples, randomNumberGenerator);
}
// Pick the best split candidate and return it.
float bestGain = static_cast<float>(INT_MIN);
int bestIndex = -1;
#ifdef WITH_OPENMP
#pragma omp parallel for
#endif
for(int i = 0; i < candidateCount; ++i)
{
#if 0 && WITH_OPENMP
int threadID = omp_get_thread_num();
int threadCount = omp_get_num_threads();
std::cout << "threadID=" << threadID << " threadCount=" << threadCount << '\n';
#endif
#if 0
std::cout << *splitCandidate->m_decisionFunction << '\n';
#endif
// Partition the examples using the split candidate's decision function.
m_splitCandidates[i].m_leftExamples.clear();
m_splitCandidates[i].m_rightExamples.clear();
for(size_t j = 0, size = examples.size(); j < size; ++j)
{
if(m_splitCandidates[i].m_decisionFunction->classify_descriptor(*examples[j]->get_descriptor()) == DecisionFunction::DC_LEFT)
{
m_splitCandidates[i].m_leftExamples.push_back(examples[j]);
}
else
{
m_splitCandidates[i].m_rightExamples.push_back(examples[j]);
}
}
// Calculate the information gain we would obtain from this split.
float gain = calculate_information_gain(reservoir, initialEntropy, m_splitCandidates[i].m_leftExamples, m_splitCandidates[i].m_rightExamples, inverseClassWeights);
#ifdef WITH_OPENMP
#pragma omp critical
#endif
{
if(gain > bestGain)
{
if(gain > gainThreshold && !m_splitCandidates[i].m_leftExamples.empty() && !m_splitCandidates[i].m_rightExamples.empty())
{
bestGain = gain;
bestIndex = i;
}
}
}
}
Split_Ptr bestSplitCandidate;
if(bestIndex != -1) bestSplitCandidate.reset(new Split(m_splitCandidates[bestIndex]));
// Return a split candidate that had maximum gain (note that this may be NULL if no split had a high enough gain).
return bestSplitCandidate;
}
//#################### PRIVATE STATIC MEMBER FUNCTIONS ####################
private:
/**
* \brief Calculates the information gain that results from splitting an example reservoir in a particular way.
*
* \param reservoir The reservoir.
* \param initialEntropy The entropy of the example set before the split.
* \param leftExamples The examples that end up in the left half of the split.
* \param rightExamples The examples that end up in the right half of the split.
* \param inverseClassWeights The (optional) inverses of the L1-normalised class frequencies observed in the training data.
* \return The information gain resulting from the split.
*/
static float calculate_information_gain(const ExampleReservoir<Label>& reservoir, float initialEntropy, const std::vector<Example_CPtr>& leftExamples, const std::vector<Example_CPtr>& rightExamples, const boost::optional<std::map<Label,float> >& inverseClassWeights)
{
float exampleCount = static_cast<float>(reservoir.current_size());
std::map<Label,float> multipliers = reservoir.get_class_multipliers();
if(inverseClassWeights) multipliers = combine_multipliers(multipliers, *inverseClassWeights);
float leftEntropy = ExampleUtil::calculate_entropy(leftExamples, multipliers);
float rightEntropy = ExampleUtil::calculate_entropy(rightExamples, multipliers);
float leftWeight = leftExamples.size() / exampleCount;
float rightWeight = rightExamples.size() / exampleCount;
#if 0
std::cout << "L: " << ExampleUtil::make_histogram(leftExamples) << ' ' << leftEntropy << '\n';
std::cout << "R: " << ExampleUtil::make_histogram(rightExamples) << ' ' << rightEntropy << '\n';
#endif
float gain = initialEntropy - (leftWeight * leftEntropy + rightWeight * rightEntropy);
#if 0
std::cout << "Gain: " << gain << '\n';
#endif
return gain;
}
/**
* \brief Multiplies together two sets of multipliers that share some labels in common.
*
* Multipliers that only appear in one of the two input sets will not be included in the result,
* e.g. combine_multipliers({a => 0.1, b => 0.2}, {b => 0.5, c => 0.3}) = {b => 0.2 * 0.5 = 0.1}.
*
* \param multipliers1 The first set of multipliers.
* \param multipliers2 The second set of multipliers.
* \return The combined multipliers.
*/
static std::map<Label,float> combine_multipliers(const std::map<Label,float>& multipliers1, const std::map<Label,float>& multipliers2)
{
std::map<Label,float> result;
typename std::map<Label,float>::const_iterator it = multipliers1.begin(), iend = multipliers1.end(), jt = multipliers2.begin(), jend = multipliers2.end();
while(it != iend && jt != jend)
{
if(it->first == jt->first)
{
result.insert(std::make_pair(it->first, it->second * jt->second));
++it, ++jt;
}
else if(it->first < jt->first) ++it;
else ++jt;
}
return result;
}
};
}
#endif
|
spot.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "pix.h"
/*-------------------------------------------------------------------------
*
* Used to sort spots in descent of frame IDs.
*
*------------------------------------------------------------------------*/
int spot_cmp(const void *a, const void *b) {
sp_t *aa = *((sp_t **)a);
sp_t *bb = *((sp_t **)b);
if (aa != NULL && bb != NULL) {
if (aa->fID > bb->fID)
return -1;
else if (aa->fID < bb->fID)
return 1;
else
return 0;
} else {
if (aa == NULL && bb == NULL)
return 0;
else if (aa == NULL)
return 1;
else
return -1;
}
}
/*-------------------------------------------------------------------------
*
* Spot finding: from a set of successive frames.
*
*------------------------------------------------------------------------*/
void spot_dframe(para_t *p) {
frameIO_t *fio;
frameloc_t *fm0=NULL, *fm1=NULL;
int fID, fID0, fIDN, twoperc, n;
fID0 = p->frameID1;
fIDN = p->frameID2;
fio = frameIO_Open(p);
twoperc = (fIDN-fID0+1) / 50;
printf("Frames for analysis: (%d,%d)\n", fID0, fIDN);
fm1 = frameIO(p, fio, fIDN); // load the *next* frame
n = 0;
for (fID=fIDN-1; fID >= fID0; fID--) {
fm0 = frameIO(p, fio, fID); // load the *this* frame
if (p->alg == 0)
frameSpots(p, fm0, fm1);
else
frameSpot2(p, fm0, fm1);
frameDelete(fm1);
frame_sum(p, fm0);
fm1 = fm0;
n++;
if (twoperc > 0 && n % twoperc == 0) {
fprintf(stderr, ".");
fflush(stderr);
}
}
fprintf(stderr, "\n");
frameIO_Close(p, fio);
frameDelete(fm1);
}
/*-------------------------------------------------------------------------
*
* Spot finding: from a single frame.
*
*------------------------------------------------------------------------*/
void spot_sframe(para_t *p) {
frameIO_t *fio;
frameloc_t *fm=NULL;
fio = frameIO_Open(p);
fm = frameIO(p, fio, p->frameID2);
if (p->alg == 0)
frameSpots(p, fm, NULL);
else
frameSpot2(p, fm, NULL);
frame_sum(p, fm);
frameDelete(fm);
}
/*-------------------------------------------------------------------------
*
* Spot handling: fitting for all the candidate spots.
*
*------------------------------------------------------------------------*/
void spot_fitting(para_t *p) {
FILE *f;
double *x_fit, *y_fit, dt;
sp_t **sp;
int i, x, y, r, n, twoperc;
int sdim_x, sdim_y, x_rng, y_rng, imglen;
// Prepare to show the progress of running.
printf("Found candidate spots: %d\n", p->n_sp1);
fflush(stdout);
twoperc = p->n_sp1 / 50;
// Prepare coordinate of pixels of the spot, which is relative to its center.
sdim_x = p->x_find_pixels;
sdim_y = p->y_find_pixels;
imglen = sdim_x * sdim_y;
x_rng = sdim_x / 2;
y_rng = sdim_y / 2;
x_fit = malloc(imglen * sizeof(double));
y_fit = malloc(imglen * sizeof(double));
if (!x_fit || !y_fit)
pstop("!!! SpotFit: not enough memory.\n");
i = 0;
for (y=-y_rng; y <= y_rng; y++) {
for (x=-x_rng; x <= x_rng; x++) {
x_fit[i] = x;
y_fit[i] = y;
i++;
}
}
// Fitting for the normal spots.
sp = p->sp1;
n = 0;
//#pragma omp parallel for private(i,r) reduction(+:n)
//#pragma omp parallel for private(i,r) reduction(+:n)
for (i=0; i < p->n_sp1; i++) {
r = SpotFit(p, x_fit, y_fit, sp[i]);
if (r == 0) n++;
if (twoperc > 0 && i % twoperc == 0) {
fprintf(stderr, ".");
fflush(stderr);
}
}
fprintf(stderr, "\n");
printf("Total valid particles: %d\n", n);
// Fitting for the high-intensity spots.
sp = p->sp2;
n = 0;
//#pragma omp parallel for private(i,r) reduction(+:n)
//#pragma omp parallel for private(i,r) reduction(+:n)
for (i=0; i < p->n_sp2; i++) {
r = SpotFit(p, x_fit, y_fit, sp[i]);
if (r == 0) n++;
}
printf("Total high-intensity particles: %d\n", n);
// Output the results of normal spots, and update the statistics.
f = out_fit_init(p, p->outfn);
sp = p->sp1;
for (i=0, n=0; i < p->n_sp1; i++) {
if (sp[i] && sp[i]->res) {
out_fit(p, f, n, sp[i]);
x = sp[i]->fID - p->frameID1;
p->fsts[x].n_event++;
n++;
}
}
dt = get_realtime();
fprintf(f, "ExecTime: %E sec\n", dt);
printf("ExecTime: %E sec\n", dt);
out_fit_close(f);
// Output the results of high-intensity spots.
f = out_fit_init(p, p->outfnH);
sp = p->sp2;
for (i=0, n=0; i < p->n_sp2; i++) {
if (sp[i] && sp[i]->res) {
out_fit(p, f, n, sp[i]);
n++;
}
}
out_fit_close(f);
// Output the statistics and the sum of pixels.
out_framests(p);
out_framesum(p);
}
/*-------------------------------------------------------------------------
*
* Spot handling: output the images of all candidate spots.
*
*------------------------------------------------------------------------*/
void spot_output_img(para_t *p) {
FILE *f;
float *data;
int i, j, n, imglen, *img;
const char *fn;
fn = "spot.img";
imglen = p->x_find_pixels * p->y_find_pixels;
if ((f = fopen(fn, "wb")) == NULL)
pstop("!!! spot_output_img: cannot open file: %s\n", fn);
if ((data = malloc(imglen*sizeof(float))) == NULL)
pstop("!!! spot_output_img: not enough memory.\n");
n = 0;
for (i=0; i < p->n_sp1; i++) {
if (p->sp1[i] == NULL || p->sp1[i]->img == NULL) continue;
img = p->sp1[i]->img;
for (j=0; j < imglen; j++)
data[j] = (float)img[j];
fwrite(data, sizeof(float), imglen, f);
n++;
}
printf("Number of found spots: %d\n", n);
fclose(f);
free(data);
}
|
GB_binop__max_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__max_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__max_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__max_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_int64)
// A*D function (colscale): GB (_AxD__max_int64)
// D*A function (rowscale): GB (_DxB__max_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__max_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__max_int64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_int64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_int64)
// C=scalar+B GB (_bind1st__max_int64)
// C=scalar+B' GB (_bind1st_tran__max_int64)
// C=A+scalar GB (_bind2nd__max_int64)
// C=A'+scalar GB (_bind2nd_tran__max_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_INT64 || GxB_NO_MAX_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__max_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__max_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__max_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
chkompt.c | // RUN: %libomp-compile && env OMP_CANCELLATION=true %libomp-run | %sort-threads | FileCheck %s
#define __STDC_FORMAT_MACROS
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <omp-tools.h>
#include "chkompt.h"
int nfails = 0;
void (*skew_delay_ptr)(int) = skew_delay;
void (*delay_ptr)(int) = delay;
// ------------------------------------------------------------------------
// Main program -- drive various tests
// ------------------------------------------------------------------------
int main(int argc, char **argv)
{
char buf[256];
starttime = gethrtime();
// fprintf(stderr, "main invoked\n");
// Set thread count; causes the initialization of the OMPT code
omp_set_num_threads(NUMTHREADS);
// look up the ompt_get_task_info address
ompt_get_task_info_fn = (ompt_get_task_info_t) my_lookup("ompt_get_task_info");
// test lock callbacks
lockcbtest();
(*delay_ptr)(10);
// test reduction
reductiontest();
(*delay_ptr)(10);
// Test frames for multiple loops in a single parallel region
testparallel();
(*delay_ptr)(10);
// Test frames for independent parallel for loops with static scheduling
testparallelfor();
(*delay_ptr)(10);
// test parallel sections
testparallelsections();
(*delay_ptr)(10);
// test explicit tasks
testtasks();
(*delay_ptr)(10);
// Check for failures
if (nfails != 0 ) {
sprintf(buf, "\n FAILURE:\n\t%d ERROR%s detected\n\n",
nfails,
nfails == 1 ? "" : "s" );
ts_write(buf);
printf("\n FAILURE:\n\t%d ERROR%s detected\n\n",
nfails,
nfails == 1 ? "" : "s" );
exit(1);
} else {
ts_write("\n No failures\n\n");
printf("No failures\n");
exit(0);
}
// CHECK: No failures
}
// ------------------------------------------------------------------------
// Test "omp parallel" with "omp for" loops with various schedules
// ------------------------------------------------------------------------
void testparallel()
{
int i;
ts_write("\n starting testparallel\n\n");
#pragma omp parallel private(i)
{
(*validate_ptr)("parallel start");
#pragma omp master
ts_write("\n starting for\n\n");
#pragma omp for
for(i = 0; i < N; i++) (*validate_ptr)("for");
#pragma omp master
(*delay_ptr)(10);
#pragma omp master
ts_write("\n starting for static\n\n");
#pragma omp for schedule(static)
for(i = 0; i < N; i++) (*validate_ptr)("for schedule(static)");
#pragma omp master
(*delay_ptr)(10);
#pragma omp master
ts_write("\n starting for dynamic\n\n");
#pragma omp for schedule(dynamic)
for(i = 0; i < N; i++) (*validate_ptr)("for schedule(dynamic)");
#pragma omp master
(*delay_ptr)(10);
#pragma omp master
ts_write("\n starting for guided\n\n");
#pragma omp for schedule(guided)
for(i = 0; i < N; i++) (*validate_ptr)("for schedule(guided)");
#pragma omp master
(*delay_ptr)(10);
(*validate_ptr)("parallel end");
}
}
// ------------------------------------------------------------------------
// Test "omp parallel for" loops with various schedules
// ------------------------------------------------------------------------
void
testparallelfor()
{
teststatic();
(*delay_ptr)(10);
testdynamic();
(*delay_ptr)(10);
testguided();
(*delay_ptr)(10);
testsections();
(*delay_ptr)(10);
}
// ------------------------------------------------------------------------
// Test "omp parallel for" loops with various schedules
// ------------------------------------------------------------------------
void teststatic()
{
int i;
ts_write("\n starting teststatic\n\n");
#pragma omp parallel for schedule(static) private(i)
for(i = 0; i < N; i++) (*validate_ptr)("parallel for static");
}
void testdynamic()
{
int i;
ts_write("\n starting testdynamic\n\n");
#pragma omp parallel for schedule(dynamic) private(i)
for(i = 0; i < N; i++) (*validate_ptr)("parallel for dynamic");
}
void testguided()
{
int i;
ts_write("\n starting testguided\n\n");
#pragma omp parallel for schedule(guided) private(i)
for(i = 0; i < N; i++) (*validate_ptr)("parallel for guided");
}
// ------------------------------------------------------------------------
// Test "omp sections"
// ------------------------------------------------------------------------
void testsections()
{
ts_write("\n starting testsections\n\n");
#pragma omp parallel
{
#pragma omp sections
{
#pragma omp section
{
(*validate_ptr)("omp section 1");
#ifdef RUN_SKEW
(*skew_delay_ptr)(1);
#endif
}
#pragma omp section
{
(*validate_ptr)("omp section 2");
#ifdef RUN_SKEW
(*skew_delay_ptr)(2);
#endif
}
#pragma omp section
{
(*validate_ptr)("omp section 3");
#ifdef RUN_SKEW
(*skew_delay_ptr)(3);
#endif
}
}
}
}
void testparallelsections()
{
ts_write("\n starting testparallelsections\n\n");
#pragma omp parallel sections num_threads(NUMTHREADS)
{
#pragma omp section
(*validate_ptr)("omp parallel section 1");
#pragma omp section
(*validate_ptr)("omp parallel section 2");
#pragma omp section
(*validate_ptr)("omp parallel section 3");
}
}
void testtasks()
{
ts_write("\n starting testtasks\n\n");
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task
(*validate_ptr)("omp task 1");
#pragma omp task
(*validate_ptr)("omp task 2");
#pragma omp task
(*validate_ptr)("omp task 3");
#pragma omp task
(*validate_ptr)("omp task 4");
#pragma omp task
(*validate_ptr)("omp task 5");
#pragma omp task
(*validate_ptr)("omp task 6");
#pragma omp task
(*validate_ptr)("omp task 7");
#pragma omp task
(*validate_ptr)("omp task 8");
#pragma omp task
(*validate_ptr)("omp task 9");
}
}
}
// reductiontest -- check for appropriate callbacks
//
void
reductiontest()
{
int sum, i;
ts_write("\n starting reductiontest\n\n");
sum = 0;
#pragma omp parallel for reduction(+:sum)
for(i = 0; i < N; i++) {
sum += i;
(*validate_ptr)("reductiontest");
}
}
// -----------------------------------------------------------
// lockcbtest -- make various omp lock calls and verify that
// the code pointers are plausible
//
void
lockcbtest()
{
omp_lock_t lock1, lock2;
omp_nest_lock_t lock3;
ts_write("\n starting lockcbtest\n\n");
// initialize the locks
omp_init_lock(&lock1);
omp_init_lock(&lock2);
omp_init_nest_lock(&lock3);
#pragma omp parallel
{
(*validate_ptr)("lockcb start");
#pragma omp master
{
omp_set_lock(&lock1); // code pointer should be approximately label1
label1: omp_unset_lock(&lock1);
omp_set_lock(&lock2); // code pointer should be approximately label2
label2: omp_unset_lock(&lock2);
// now try a nested lock
omp_set_nest_lock(&lock3);
omp_set_nest_lock(&lock3);
omp_set_nest_lock(&lock3);
omp_unset_nest_lock(&lock3);
omp_unset_nest_lock(&lock3);
omp_unset_nest_lock(&lock3);
}
(*validate_ptr)("lockcb end");
}
omp_destroy_lock(&lock1);
omp_destroy_lock(&lock2);
omp_destroy_nest_lock(&lock3);
}
// ------------------------------------------------------------------------
// skew_delay -- burn CPU time to delay threads
// ------------------------------------------------------------------------
void
skew_delay(int count)
{
int j,k;
volatile float x;
int jmax;
jmax = 7 * count;
for ( j = 0; j < jmax; j++ ) {
x = 0;
for (k = 0; k < NSKEW; k ++ ) {
x = x + 1.0;
}
}
}
// ------------------------------------------------------------------------
// delay -- burn CPU time in main program to space out operations
// ------------------------------------------------------------------------
void
delay(int count)
{
int j,k;
volatile float x;
int jmax;
jmax = 7 * count;
for ( j = 0; j < jmax; j++ ) {
x = 0;
for (k = 0; k < NSKEW; k ++ ) {
x = x + 1.0;
}
}
}
// omptcb.c -- code for the interactions with the OpenMP library to verify
// the behavior of various callbacks
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <omp-tools.h>
#include "chkompt.h"
ompt_function_lookup_t my_lookup;
ompt_set_callback_t ompt_set_callback_fn; // Address of routine to set callback
ompt_get_task_info_t ompt_get_task_info_fn; // Address of routine to get task information
void (*validate_ptr)(const char *) = validate;
void (*ck_ra_)(const char *, const void *, int) = ck_ra;
int in_implicit_task = 0;
hrtime_t starttime;
// ------------------------------------------------------------------------
// inform the runtime that we will be using OMPT
// This routine is automatically invoked by the OpenMP runtime at
// its initialization. It tells the library where to find:
// ompt_initialize -- which is invoked at the first entry to the runtime
// and
// ompt_finalize -- which is invoked when the runtime shuts down
// ------------------------------------------------------------------------
ompt_start_tool_result_t *
ompt_start_tool
(
unsigned int omp_version,
const char *runtime_version
)
{
// fprintf(stderr, "ompt_start_tool invoked\n");
static ompt_start_tool_result_t result = {
ompt_initialize,
ompt_finalize,
ompt_data_none
};
return &result;
}
// ------------------------------------------------------------------------
// initialize upcall for OMPT
// ------------------------------------------------------------------------
int
ompt_initialize
(
ompt_function_lookup_t lookup,
int initial_device_num,
ompt_data_t *tool_data
)
{
// fprintf(stderr, "ompt_initialize invoked\n");
my_lookup = lookup;
starttime = gethrtime();
#ifndef NO_CALLBACKS
// look up two runtime entry points
ompt_set_callback_fn = (ompt_set_callback_t) lookup("ompt_set_callback");
// register callbacks to be notified about various events
register_callbacks();
#endif
return 1;
}
// ------------------------------------------------------------------------
// finalize upcall for OMPT -- nothing to do
// ------------------------------------------------------------------------
void
ompt_finalize ( ompt_data_t *tool_data)
{
}
// ------------------------------------------------------------------------
// Register the various callbacks that will be tested
// ------------------------------------------------------------------------
char *cb_names[] = {
"illegal callback number", //=0
"ompt_callback_thread_begin", //=1,
"ompt_callback_thread_end", //=2,
"ompt_callback_parallel_begin", //=3,
"ompt_callback_parallel_end", //=4,
"ompt_callback_task_create", //=5,
"ompt_callback_task_schedule", //=6,
"ompt_callback_implicit_task", //=7,
"ompt_callback_target", //=8,
"ompt_callback_target_data_op", //=9,
"ompt_callback_target_submit", //=10,
"ompt_callback_control_tool", //=11,
"ompt_callback_device_initialize", //=12,
"ompt_callback_device_finalize", //=13,
"ompt_callback_device_load", //=14,
"ompt_callback_device_unload", //=15,
"ompt_callback_sync_region_wait", //=16,
"ompt_callback_mutex_released", //=17,
"ompt_callback_dependences", //=18,
"ompt_callback_task_dependence", //=19,
"ompt_callback_work", //=20,
"ompt_callback_master", //=21,
"ompt_callback_target_map", //=22,
"ompt_callback_sync_region", //=23,
"ompt_callback_lock_init", //=24,
"ompt_callback_lock_destroy", //=25,
"ompt_callback_mutex_acquire", //=26,
"ompt_callback_mutex_acquired", //=27,
"ompt_callback_nest_lock", //=28,
"ompt_callback_flush", //=29,
"ompt_callback_cancel", //=30,
"ompt_callback_reduction", //=31,
"ompt_callback_dispatch", //=32
NULL
};
void
register_callbacks()
{
int ncallbacks = 0;
ompt_set_result_t ret;
// Define a macro to set a callback
#define SetCallback(type,name) \
ret = ompt_set_callback_fn ( type, (ompt_callback_t) name); \
if ( (ret == ompt_set_error) || (ret == ompt_set_never) ) { \
fprintf(stderr, " Note: %s (%2d) is never triggered in this implementation of OMPT (%d)\n", \
cb_names[type], (int)type, (int)ret ); \
} else if (ret == ompt_set_impossible) { \
fprintf(stderr, " Note: %s (%2d) is impossible in this implementation of OMPT (%d)\n", \
cb_names[type], (int)type, (int)ret ); \
} else if ( (ret == ompt_set_sometimes) || (ret == ompt_set_sometimes_paired) ) { \
fprintf(stderr, " Note: %s (%2d) may or may not be triggered in this implementation of OMPT (%d)\n", \
cb_names[type], (int)type, (int)ret ); \
} else { \
ncallbacks ++; \
}
// Callback for thread begin
SetCallback(ompt_callback_thread_begin,ompt_thread_begin);
// Callback for thread end
SetCallback(ompt_callback_thread_end, ompt_thread_end);
// Callback for parallel region begin
SetCallback(ompt_callback_parallel_begin, ompt_parallel_begin);
// Callback for parallel region end
SetCallback(ompt_callback_parallel_end, ompt_parallel_end);
// Callback for task creation
SetCallback(ompt_callback_task_create, ompt_task_create);
// Callback for task schedule
SetCallback(ompt_callback_task_schedule, ompt_task_schedule);
// Callback for implicit task creation
SetCallback(ompt_callback_implicit_task, ompt_implicit_task);
// Callback for target
SetCallback(ompt_callback_target, ompt_targetcb);
// Callback for target_data_op
SetCallback(ompt_callback_target_data_op, ompt_target_data_op);
// Callback for target submit
SetCallback(ompt_callback_target_submit, ompt_target_submit);
// Callback for control_tool
SetCallback(ompt_callback_control_tool, ompt_control_tool);
// Callback for device_initialize
SetCallback(ompt_callback_device_initialize, ompt_device_initialize);
// Callback for device_finalize
SetCallback(ompt_callback_device_finalize, ompt_device_finalize);
// Callback for device_load
SetCallback(ompt_callback_device_load, ompt_device_load);
// Callback for device_unload
SetCallback(ompt_callback_device_unload, ompt_device_unload);
// Callback for synchronization region wait
SetCallback(ompt_callback_sync_region_wait, ompt_sync_region_wait);
// Callback for mutex released
SetCallback(ompt_callback_mutex_released, ompt_mutex_released);
// Callback for dependences
SetCallback(ompt_callback_dependences, ompt_dependences);
// Callback for task_dependence
SetCallback(ompt_callback_task_dependence, ompt_task_dependence);
// Callback for work entry
SetCallback(ompt_callback_work, ompt_work);
// Callback for master region entry
SetCallback(ompt_callback_master, ompt_master);
// Callback for target map
SetCallback(ompt_callback_target_map, ompt_target_map);
// Callback for synchronization region
SetCallback(ompt_callback_sync_region, ompt_sync_region);
// Callback for lock init
SetCallback(ompt_callback_lock_init, ompt_lock_init);
// Callback for lock_destroy
SetCallback(ompt_callback_lock_destroy, ompt_lock_destroy);
// Callback for mutex acquire
SetCallback(ompt_callback_mutex_acquire, ompt_mutex_acquire);
// Callback for mutex acquired
SetCallback(ompt_callback_mutex_acquired, ompt_mutex_acquired);
// Callback for nest_lock
SetCallback(ompt_callback_nest_lock, ompt_nest_lock);
// Callback for flush
SetCallback(ompt_callback_flush, ompt_flush);
// Callback for cancel
SetCallback(ompt_callback_cancel, ompt_cancel);
// Callback for reduction
SetCallback(ompt_callback_reduction, ompt_reduction);
// Callback for dispatch
SetCallback(ompt_callback_dispatch, ompt_dispatch);
fprintf(stderr, " %d other callbacks were set\n\n", ncallbacks);
}
// ------------------------------------------------------------------------
// ------------------------------------------------------------------------
// The various Callback routines
// ------------------------------------------------------------------------
// ------------------------------------------------------------------------
// OMPT callback for implicit task creation
// ------------------------------------------------------------------------
void
ompt_implicit_task
(
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
unsigned int actual_parallelism,
unsigned int index,
int flags
)
{
// trace the callback
ck_ra("implicit_task_CB", (const void*)1, (int)index);
if (endpoint == ompt_scope_begin) {
(*validate_ptr)("implicit task begin");
in_implicit_task = 1;
} else if (endpoint == ompt_scope_end) {
// (*validate_ptr)("implicit task end"); // can't validate
in_implicit_task = 0;
} else {
abort(); // no others are defined
}
}
// ------------------------------------------------------------------------
// OMPT callback for begin
// ------------------------------------------------------------------------
void
ompt_thread_begin
(
ompt_thread_t thread_type,
ompt_data_t *thread_data
)
{
ck_ra("thread_begin_CB", (const void*)1, (int)thread_type);
}
// ------------------------------------------------------------------------
// OMPT callback for thread end
// ------------------------------------------------------------------------
void
ompt_thread_end
(
ompt_data_t *thread_data
)
{
ck_ra("thread_end_CB", (const void*)1, 0);
}
// ------------------------------------------------------------------------
// OMPT callback for parallel region entry
// ------------------------------------------------------------------------
void
ompt_parallel_begin
(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame,
ompt_data_t *parallel_data,
unsigned int requested_parallelism,
int flags,
const void *codeptr_ra
)
{
ck_ra("parallel_begin_CB", codeptr_ra, 0);
}
// ------------------------------------------------------------------------
// OMPT callback for parallel region end
// ------------------------------------------------------------------------
void
ompt_parallel_end
(
ompt_data_t *parallel_data,
ompt_data_t *encountering_task_data,
int flags,
const void *codeptr_ra
)
{
ck_ra("parallel_end_CB", codeptr_ra, 0);
}
// ------------------------------------------------------------------------
// OMPT callback for task create
// ------------------------------------------------------------------------
void
ompt_task_create
(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame,
ompt_data_t *new_task_data,
int flags,
int has_dependences,
const void *codeptr_ra
)
{
int is_initial;
is_initial = flags & ompt_task_initial;
if (!is_initial) {
ck_ra("task_create_CB", codeptr_ra, 0);
} else {
ck_ra("task_create_CB", (const void *) 1, 0);
}
}
// ------------------------------------------------------------------------
// OMPT callback for task schedule
// ------------------------------------------------------------------------
void
ompt_task_schedule
(
ompt_data_t *prior_task_data,
ompt_task_status_t prior_task_status,
ompt_data_t *new_task_data
)
{
ck_ra("task_create_CB", (const void *)1, 0);
}
// ------------------------------------------------------------------------
// OMPT callback for ompt_target
// ------------------------------------------------------------------------
void
ompt_targetcb(
ompt_target_t kind,
ompt_scope_endpoint_t endpoint,
int device_num,
ompt_id_t task_data,
ompt_id_t target_id,
const void *codeptr_ra
)
{
ck_ra("target_CB", codeptr_ra, device_num);
}
// ------------------------------------------------------------------------
// OMPT callback for ompt_target_data_op
// ------------------------------------------------------------------------
void
ompt_target_data_op(
ompt_id_t target_id,
ompt_id_t host_op_id,
ompt_target_data_op_t optype,
void *src_addr,
int src_device_num,
void *dest_addr,
int dest_device_num,
size_t bytes,
const void *codeptr_ra
)
{
ck_ra("target data_op_CB", codeptr_ra, src_device_num);
}
// ------------------------------------------------------------------------
// OMPT callback for ompt_target_submit
// ------------------------------------------------------------------------
void
ompt_target_submit(
ompt_id_t target_id,
ompt_id_t host_op_id,
unsigned int requested_num_teams
)
{
ck_ra("target_submit_CB", (const void *)1, target_id);
}
// ------------------------------------------------------------------------
// OMPT callback for work
// ------------------------------------------------------------------------
void
ompt_work (
ompt_work_t wstype,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
uint64_t count,
const void *codeptr_ra
)
{
ck_ra("work_CB", codeptr_ra, (int)wstype);
}
// ------------------------------------------------------------------------
// OMPT callback for master
// ------------------------------------------------------------------------
void
ompt_master (
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra
)
{
ck_ra("master_CB", codeptr_ra, (int)endpoint);
}
// ------------------------------------------------------------------------
// OMPT callback for target_map
// ------------------------------------------------------------------------
void
ompt_target_map (
ompt_id_t id,
unsigned int nitems,
void **host_adder,
void **device_addr,
size_t *bytes,
unsigned int *mapping_flags,
const void *codeptr_ra
)
{
ck_ra("target_map_CB", codeptr_ra, (int) id);
}
// ------------------------------------------------------------------------
// OMPT callback for sync_region_wait
// ------------------------------------------------------------------------
void
ompt_sync_region_wait (
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra
)
{
if (in_implicit_task) {
ck_ra("sync_region_wait_CB", codeptr_ra, (int) kind);
}
}
// ------------------------------------------------------------------------
// OMPT callback for sync_region
// ------------------------------------------------------------------------
void
ompt_sync_region (
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra
)
{
if (in_implicit_task) {
ck_ra("sync_region_CB", codeptr_ra, (int) kind);
}
}
// ------------------------------------------------------------------------
// OMPT callback for lock_init
// ------------------------------------------------------------------------
void
ompt_lock_init (
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra
)
{
ck_ra("lock_init_CB", codeptr_ra, (int) kind);
}
// ------------------------------------------------------------------------
// OMPT callback for lock_destroy
// ------------------------------------------------------------------------
void
ompt_lock_destroy (
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra
)
{
ck_ra("lock_destroy_CB", codeptr_ra, (int) kind);
}
// ------------------------------------------------------------------------
// OMPT callback for mutex_acquire
// ------------------------------------------------------------------------
void
ompt_mutex_acquire (
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra
)
{
ck_ra("mutex_acquire_CB", codeptr_ra, (int) kind);
}
// ------------------------------------------------------------------------
// OMPT callback for mutex acquired
// ------------------------------------------------------------------------
void
ompt_mutex_acquired (
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
ck_ra("mutex_acquired_CB", codeptr_ra, (int) kind);
}
// ------------------------------------------------------------------------
// OMPT callback for mutex released
// ------------------------------------------------------------------------
void
ompt_mutex_released(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
ck_ra("mutex_released_CB", codeptr_ra, (int) kind);
}
// ------------------------------------------------------------------------
// OMPT callback for dependences
// ------------------------------------------------------------------------
void
ompt_dependences (
ompt_data_t *task_data,
const ompt_dependence_t *deps,
int ndeps
)
{
ck_ra("dependences_CB", (const void *)1, (int) ndeps);
}
// ------------------------------------------------------------------------
// OMPT callback for task_dependence
// ------------------------------------------------------------------------
void
ompt_task_dependence (
ompt_data_t *src_task_data,
ompt_data_t *sink_task_data
)
{
ck_ra("task_dependence_CB", (const void *)1, 0);
}
// ------------------------------------------------------------------------
// OMPT callback for nest_lock
// ------------------------------------------------------------------------
void
ompt_nest_lock (
ompt_scope_endpoint_t endpoint,
ompt_wait_id_t wait_id,
const void *codeptr_ra
)
{
ck_ra("nest_lock_CB", codeptr_ra, (int) endpoint);
}
// ------------------------------------------------------------------------
// OMPT callback for flush
// ------------------------------------------------------------------------
void
ompt_flush (
ompt_data_t *thread_data,
const void *codeptr_ra
)
{
ck_ra("flush_CB", codeptr_ra, 0);
}
// ------------------------------------------------------------------------
// OMPT callback for cancel
// ------------------------------------------------------------------------
void
ompt_cancel (
ompt_data_t *task_data,
int flags,
const void *codeptr_ra
)
{
ck_ra("cancel_CB", codeptr_ra, 0);
}
// ------------------------------------------------------------------------
// OMPT callback for control_tool
// ------------------------------------------------------------------------
void
ompt_control_tool (
uint64_t command,
uint64_t modifier,
void *arg,
const void *codeptr_ra
)
{
ck_ra("control_tool_CB", codeptr_ra, 0);
}
// ------------------------------------------------------------------------
// OMPT callback for device_initialize
// ------------------------------------------------------------------------
void
ompt_device_initialize (
int device_num,
const char *type,
ompt_device_t *device,
ompt_function_lookup_t lookup,
const char *documentation
)
{
ck_ra("device_initialize_CB", (const void*) 1, device_num);
}
// ------------------------------------------------------------------------
// OMPT callback for device_finalize
// ------------------------------------------------------------------------
void
ompt_device_finalize(
int device_num
)
{
ck_ra("device_finalize_CB", (const void*) 1, device_num);
}
// ------------------------------------------------------------------------
// OMPT callback for device_load
// ------------------------------------------------------------------------
void
ompt_device_load(
int device_num,
const char *filename,
int64_t offset_in_file,
void *vma_in_file,
size_t bytes,
void *host_addr,
void *device_addr,
uint64_t module_id
)
{
ck_ra("device_load_CB", (const void*) 1, device_num);
}
// ------------------------------------------------------------------------
// OMPT callback for device_unload
// ------------------------------------------------------------------------
void
ompt_device_unload (
int device_num,
uint64_t module_id
)
{
ck_ra("device_unload_CB", (const void*) 1, device_num);
}
// ------------------------------------------------------------------------
// OMPT callback for dispatch
// ------------------------------------------------------------------------
void
ompt_reduction (
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_id_t parallel_id,
ompt_id_t task_id,
const void *codeptr_ra
)
{
ck_ra("reduction_CB", codeptr_ra, (int)kind);
}
// ------------------------------------------------------------------------
// OMPT callback for dispatch
// ------------------------------------------------------------------------
void
ompt_dispatch (
ompt_data_t *parallel_data,
ompt_data_t *task_data,
ompt_dispatch_t kind,
ompt_data_t instance
)
{
ck_ra("dispatch_CB", (const void*)1, (int)kind);
}
// ------------------------------------------------------------------------
// ck_ra -- invoked from various callbacks
// check that the return address pointer is non-NULL
// ------------------------------------------------------------------------
void
ck_ra(const char * type, const void *ra, int param)
{
int threadnum;
char buf[256];
threadnum = omp_get_thread_num();
if (ra == NULL) {
sprintf( buf,
"%22s -- ERROR -- thread %3d, param = %d, codeptr_ra == NULL\n",
type, threadnum, param );
ts_write (buf);
#pragma omp atomic update
nfails ++;
} else {
#ifdef TRACE_ALL
sprintf( buf,
"%22s OK ck_ra -- thread %3d, param = %d codeptr_ra = %p\n",
type, threadnum, param, ra );
ts_write (buf);
#endif
}
}
// ------------------------------------------------------------------------
// validate
// delay a bit
// ask for the caller's frame
// check that its exit_frame pointer is non-NULL, and flag is non-zero
// check that its enter_frame pointer is NULL, and flag is zero
// ask for the caller's ancestors' frame
// check that its exit_frame pointer is non-NULL, and flag is non-zero
// delay a varying amount, depending on thread number to desynchonize the threads
// ------------------------------------------------------------------------
void
validate(const char *type)
{
int thread_num;
ompt_frame_t *task_frame;
ompt_frame_t *parent_task_frame;
char buf[256];
#ifdef RUN_SKEW
(*skew_delay_ptr)(1);
#endif
ompt_get_task_info_fn
(
0, // ancestor_level
NULL, // flags
NULL, // task_data
&task_frame,
NULL, // parallel_data
&thread_num
);
// Check for failure
if (task_frame == NULL) {
sprintf( buf,
"%22s -- ERROR -- thread %3d task_frame = NULL\n",
type, thread_num);
ts_write (buf);
#pragma omp atomic update
nfails ++;
} else if (task_frame->exit_frame.ptr == NULL) {
sprintf( buf,
"%22s -- ERROR -- thread %3d exit_frame.ptr = NULL\n",
type, thread_num);
ts_write (buf);
#pragma omp atomic update
nfails ++;
} else if (task_frame->exit_frame_flags == 0) {
sprintf( buf,
"%22s -- ERROR -- thread %3d exit_frame.flags = 0\n",
type, thread_num);
ts_write (buf);
#pragma omp atomic update
nfails ++;
} else if (task_frame->enter_frame.ptr != NULL) {
sprintf( buf,
"%22s -- ERROR -- thread %3d enter_frame.ptr != NULL\n",
type, thread_num);
ts_write (buf);
#pragma omp atomic update
nfails ++;
} else if (task_frame->enter_frame_flags != 0) {
sprintf( buf,
"%22s -- ERROR -- thread %3d enter_frame.flags = 0x%02x != 0\n",
type, thread_num, task_frame->enter_frame_flags);
ts_write (buf);
#pragma omp atomic update
nfails ++;
// Now check the enter_frame for the ancestor
ompt_get_task_info_fn
(
1, // ancestor_level
NULL, // flags
NULL, // task_data
&parent_task_frame,
NULL, // parallel_data
&thread_num
);
if (parent_task_frame == NULL) {
sprintf( buf,
"%22s -- ERROR -- thread %3d parent_task_frame = NULL\n",
type, thread_num);
ts_write (buf);
#pragma omp atomic update
nfails ++;
} else if (parent_task_frame->enter_frame.ptr == NULL) {
sprintf( buf,
"%22s -- ERROR -- thread %3d parent enter_frame.ptr = NULL\n",
type, thread_num);
ts_write (buf);
#pragma omp atomic update
nfails ++;
} else if (parent_task_frame->enter_frame_flags == 0) {
sprintf( buf,
"%22s -- ERROR -- thread %3d parent enter_frame_flags = 0\n",
type, thread_num);
ts_write (buf);
#pragma omp atomic update
nfails ++;
}
} else {
#ifdef TRACE_ALL
sprintf( buf,
"%22s OK return -- thread %3d exit_frame.ptr = %p flags = 0x%02x\n",
type, thread_num, task_frame->exit_frame.ptr, task_frame->exit_frame_flags);
ts_write (buf);
#endif
}
#ifdef RUN_SKEW
(*skew_delay_ptr)(thread_num);
#endif
}
// ------------------------------------------------------------------------
// ts_write -- write error (or log) to stderr, with a timestamp
// if NOTIMESTAMP is defined, don't write the timestamp
// ------------------------------------------------------------------------
void
ts_write (char *message)
{
hrtime_t delta;
char buf[512];
int sec;
int nsec;
#ifdef NO_TIMESTAMPS
fwrite (message, strlen(message), 1, stderr );
#else
delta = gethrtime() - starttime;
sec = delta / 1000000000;
nsec = delta % 1000000000;
sprintf(buf, "%4d.%09d: %s",
sec, nsec, message);
fwrite (buf, strlen(buf), 1, stderr );
#endif
}
hrtime_t
gethrtime()
{
return ( (hrtime_t) (omp_get_wtime() * 1.0E09) );
}
|
GB_unaryop__identity_int32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int32_int64
// op(A') function: GB_tran__identity_int32_int64
// C type: int32_t
// A type: int64_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int32_int64
(
int32_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tworkshare.c | #include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <pthread.h>
#include <stdbool.h>
#include <string.h>
#include <omp.h> /* OpenMP */
long result=0;
void foo() {
#pragma omp parallel
{
#pragma omp for
for (long i = 0; i < 10; i++) {
#pragma omp atomic
result++;
}
#pragma omp for schedule(dynamic,1) reduction(+: result)
for (long i = 0; i < 10; i++)
result++;
#pragma omp for schedule(dynamic,1)
for (long i = 0; i < 10; i++)
#pragma omp atomic
result++;
#pragma omp for schedule(dynamic,2) nowait
for (long i = 0; i < 10; i++)
#pragma omp critical
result++;
#pragma omp atomic
result++;
#pragma omp barrier
#pragma omp single
printf("Inside foo ... result = %ld\n", result);
}
}
int main(int argc, char *argv[]) {
foo();
printf("Back in main ... result = %ld\n", result);
}
|
alignment.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
/* Original code from the Application Kernel Matrix by Cray */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <libgen.h>
#include "param.h"
#include "alignment.h"
#include "bots.h"
int readseqs(int first_seq, char *filename);
int ktup, window, signif;
int prot_ktup, prot_window, prot_signif;
int gap_pos1, gap_pos2, mat_avscore;
int nseqs, max_aa;
#define MAX_ALN_LENGTH 5000
int *seqlen_array, def_aa_xref[NUMRES+1];
int *bench_output, *seq_output;
double gap_open, gap_extend;
double prot_gap_open, prot_gap_extend;
double pw_go_penalty, pw_ge_penalty;
double prot_pw_go_penalty, prot_pw_ge_penalty;
char **args, **names, **seq_array;
int matrix[NUMRES][NUMRES];
#define MIN(a,b) ((a)<(b)?(a):(b))
#define tbgap(k) ((k) <= 0 ? 0 : tb + gh * (k))
#define tegap(k) ((k) <= 0 ? 0 : te + gh * (k))
/***********************************************************************
* :
**********************************************************************/
void del(int k, int *print_ptr, int *last_print, int *displ)
{
if (*last_print<0) *last_print = displ[(*print_ptr)-1] -= k;
else *last_print = displ[(*print_ptr)++] = -k;
}
/***********************************************************************
* :
**********************************************************************/
void add(int v, int *print_ptr, int *last_print, int *displ)
{
if (*last_print < 0) {
displ[(*print_ptr)-1] = v;
displ[(*print_ptr)++] = *last_print;
} else {
*last_print = displ[(*print_ptr)++] = v;
}
}
/***********************************************************************
* :
**********************************************************************/
int calc_score(int iat, int jat, int v1, int v2, int seq1, int seq2)
{
int i, j, ipos, jpos;
ipos = v1 + iat;
jpos = v2 + jat;
i = seq_array[seq1][ipos];
j = seq_array[seq2][jpos];
return (matrix[i][j]);
}
/***********************************************************************
* :
**********************************************************************/
int get_matrix(int *matptr, int *xref, int scale)
{
int gg_score = 0;
int gr_score = 0;
int i, j, k, ti, tj, ix;
int av1, av2, av3, min, max, maxres;
for (i = 0; i <= max_aa; i++)
for (j = 0; j <= max_aa; j++) matrix[i][j] = 0;
ix = 0;
maxres = 0;
for (i = 0; i <= max_aa; i++) {
ti = xref[i];
for (j = 0; j <= i; j++) {
tj = xref[j];
if ((ti != -1) && (tj != -1)) {
k = matptr[ix];
if (ti == tj) {
matrix[ti][ti] = k * scale;
maxres++;
} else {
matrix[ti][tj] = k * scale;
matrix[tj][ti] = k * scale;
}
ix++;
}
}
}
maxres--;
av1 = av2 = av3 = 0;
for (i = 0; i <= max_aa; i++) {
for (j = 0; j <= i; j++) {
av1 += matrix[i][j];
if (i == j) av2 += matrix[i][j];
else av3 += matrix[i][j];
}
}
av1 /= (maxres*maxres)/2;
av2 /= maxres;
av3 /= ((double)(maxres*maxres-maxres))/2;
mat_avscore = -av3;
min = max = matrix[0][0];
for (i = 0; i <= max_aa; i++)
for (j = 1; j <= i; j++) {
if (matrix[i][j] < min) min = matrix[i][j];
if (matrix[i][j] > max) max = matrix[i][j];
}
for (i = 0; i < gap_pos1; i++) {
matrix[i][gap_pos1] = gr_score;
matrix[gap_pos1][i] = gr_score;
matrix[i][gap_pos2] = gr_score;
matrix[gap_pos2][i] = gr_score;
}
matrix[gap_pos1][gap_pos1] = gg_score;
matrix[gap_pos2][gap_pos2] = gg_score;
matrix[gap_pos2][gap_pos1] = gg_score;
matrix[gap_pos1][gap_pos2] = gg_score;
maxres += 2;
return(maxres);
}
/***********************************************************************
* :
**********************************************************************/
void forward_pass(char *ia, char *ib, int n, int m, int *se1, int *se2, int *maxscore, int g, int gh)
{
int i, j, f, p, t, hh;
int HH[MAX_ALN_LENGTH];
int DD[MAX_ALN_LENGTH];
*maxscore = 0;
*se1 = *se2 = 0;
for (i = 0; i <= m; i++) {HH[i] = 0; DD[i] = -g;}
for (i = 1; i <= n; i++) {
hh = p = 0;
f = -g;
for (j = 1; j <= m; j++) {
f -= gh;
t = hh - g - gh;
if (f < t) f = t;
DD[j] -= gh;
t = HH[j] - g - gh;
if (DD[j] < t) DD[j] = t;
hh = p + matrix[(int)ia[i]][(int)ib[j]];
if (hh < f) hh = f;
if (hh < DD[j]) hh = DD[j];
if (hh < 0) hh = 0;
p = HH[j];
HH[j] = hh;
if (hh > *maxscore) {*maxscore = hh; *se1 = i; *se2 = j;}
}
}
}
/***********************************************************************
* :
**********************************************************************/
void reverse_pass(char *ia, char *ib, int se1, int se2, int *sb1, int *sb2, int maxscore, int g, int gh)
{
int i, j, f, p, t, hh, cost;
int HH[MAX_ALN_LENGTH];
int DD[MAX_ALN_LENGTH];
cost = 0;
*sb1 = *sb2 = 1;
for (i = se2; i > 0; i--){ HH[i] = -1; DD[i] = -1;}
for (i = se1; i > 0; i--) {
hh = f = -1;
if (i == se1) p = 0; else p = -1;
for (j = se2; j > 0; j--) {
f -= gh;
t = hh - g - gh;
if (f < t) f = t;
DD[j] -= gh;
t = HH[j] - g - gh;
if (DD[j] < t) DD[j] = t;
hh = p + matrix[(int)ia[i]][(int)ib[j]];
if (hh < f) hh = f;
if (hh < DD[j]) hh = DD[j];
p = HH[j];
HH[j] = hh;
if (hh > cost) {
cost = hh; *sb1 = i; *sb2 = j;
if (cost >= maxscore) break;
}
}
if (cost >= maxscore) break;
}
}
/***********************************************************************
* :
**********************************************************************/
int diff (int A, int B, int M, int N, int tb, int te, int *print_ptr, int *last_print, int *displ, int seq1, int seq2, int g, int gh)
{
int i, j, f, e, s, t, hh;
int midi, midj, midh, type;
int HH[MAX_ALN_LENGTH];
int DD[MAX_ALN_LENGTH];
int RR[MAX_ALN_LENGTH];
int SS[MAX_ALN_LENGTH];
if (N <= 0) {if (M > 0) del(M, print_ptr, last_print, displ); return( - (int) tbgap(M)); }
if (M <= 1) {
if (M <= 0) {add(N, print_ptr, last_print, displ); return( - (int)tbgap(N));}
midh = -(tb+gh) - tegap(N);
hh = -(te+gh) - tbgap(N);
if (hh > midh) midh = hh;
midj = 0;
for (j = 1; j <= N; j++) {
hh = calc_score(1,j,A,B,seq1,seq2) - tegap(N-j) - tbgap(j-1);
if (hh > midh) {midh = hh; midj = j;}
}
if (midj == 0) {
del(1, print_ptr, last_print, displ);
add(N, print_ptr, last_print, displ);
} else {
if (midj > 1) add(midj-1, print_ptr, last_print, displ);
displ[(*print_ptr)++] = *last_print = 0;
if (midj < N) add(N-midj, print_ptr, last_print, displ);
}
return midh;
}
midi = M / 2;
HH[0] = 0.0;
t = -tb;
for (j = 1; j <= N; j++) {
HH[j] = t = t - gh;
DD[j] = t - g;
}
t = -tb;
for (i = 1; i <= midi; i++) {
s = HH[0];
HH[0] = hh = t = t - gh;
f = t - g;
for (j = 1; j <= N; j++) {
if ((hh = hh - g - gh) > (f = f - gh)) f = hh;
if ((hh = HH[j] - g - gh) > (e = DD[j]- gh)) e = hh;
hh = s + calc_score(i,j,A,B,seq1,seq2);
if (f > hh) hh = f;
if (e > hh) hh = e;
s = HH[j];
HH[j] = hh;
DD[j] = e;
}
}
DD[0] = HH[0];
RR[N] = 0;
t = -te;
for (j = N-1; j >= 0; j--) {RR[j] = t = t - gh; SS[j] = t - g;}
t = -te;
for (i = M - 1; i >= midi; i--) {
s = RR[N];
RR[N] = hh = t = t-gh;
f = t - g;
for (j = N - 1; j >= 0; j--) {
if ((hh = hh - g - gh) > (f = f - gh)) f = hh;
if ((hh = RR[j] - g - gh) > (e = SS[j] - gh)) e = hh;
hh = s + calc_score(i+1,j+1,A,B,seq1,seq2);
if (f > hh) hh = f;
if (e > hh) hh = e;
s = RR[j];
RR[j] = hh;
SS[j] = e;
}
}
SS[N] = RR[N];
midh = HH[0] + RR[0];
midj = 0;
type = 1;
for (j = 0; j <= N; j++) {
hh = HH[j] + RR[j];
if (hh >= midh)
if (hh > midh || (HH[j] != DD[j] && RR[j] == SS[j]))
{midh = hh; midj = j;}
}
for (j = N; j >= 0; j--) {
hh = DD[j] + SS[j] + g;
if (hh > midh) {midh = hh;midj = j;type = 2;}
}
if (type == 1) {
diff(A, B, midi, midj, tb, g, print_ptr, last_print, displ, seq1, seq2, g, gh);
diff(A+midi, B+midj, M-midi, N-midj, g, te, print_ptr, last_print, displ, seq1, seq2, g, gh);
} else {
diff(A, B, midi-1, midj, tb, 0.0, print_ptr, last_print, displ, seq1, seq2, g, gh);
del(2, print_ptr, last_print, displ);
diff(A+midi+1, B+midj, M-midi-1, N-midj, 0.0, te, print_ptr, last_print, displ, seq1, seq2, g, gh);
}
return midh;
}
/***********************************************************************
* :
**********************************************************************/
double tracepath(int tsb1, int tsb2, int *print_ptr, int *last_print, int *displ, int seq1, int seq2)
{
int i, k;
int i1 = tsb1;
int i2 = tsb2;
int pos = 0;
int count = 0;
for (i = 1; i <= *print_ptr - 1; ++i) {
if (displ[i]==0) {
char c1 = seq_array[seq1][i1];
char c2 = seq_array[seq2][i2];
if ((c1!=gap_pos1) && (c1 != gap_pos2) && (c1 == c2)) count++;
++i1; ++i2; ++pos;
} else if ((k = displ[i]) > 0) {
i2 += k;
pos += k;
} else {
i1 -= k;
pos -= k;
}
}
return (100.0 * (double) count);
}
int pairalign(int istart, int iend, int jstart, int jend)
{
int i, n, m, si, sj;
int len1, len2, maxres;
double gg, mm_score;
int *mat_xref, *matptr;
matptr = gon250mt;
mat_xref = def_aa_xref;
maxres = get_matrix(matptr, mat_xref, 10);
if (maxres == 0) return(-1);
bots_message("Start aligning ");
#pragma omp parallel
{
#pragma omp for schedule(dynamic) private(i,n,si,sj,len1,m)
for (si = 0; si < nseqs; si++) {
if ((n = seqlen_array[si+1]) != 0){
for (i = 1, len1 = 0; i <= n; i++) {
char c = seq_array[si+1][i];
if ((c != gap_pos1) && (c != gap_pos2)) len1++;
}
for (sj = si + 1; sj < nseqs; sj++)
{
if ((m = seqlen_array[sj+1]) != 0)
{
#pragma omp task untied \
private(i,gg,len2,mm_score) firstprivate(m,n,si,sj,len1) \
shared(nseqs, bench_output,seqlen_array,seq_array,gap_pos1,gap_pos2,pw_ge_penalty,pw_go_penalty,mat_avscore)
{
int se1, se2, sb1, sb2, maxscore, seq1, seq2, g, gh;
int displ[2*MAX_ALN_LENGTH+1];
int print_ptr, last_print;
for (i = 1, len2 = 0; i <= m; i++) {
char c = seq_array[sj+1][i];
if ((c != gap_pos1) && (c != gap_pos2)) len2++;
}
gh = 10 * pw_ge_penalty;
gg = pw_go_penalty + log((double) MIN(n, m));
g = (mat_avscore <= 0) ? 20 * gg : 2 * mat_avscore * gg;
seq1 = si + 1;
seq2 = sj + 1;
forward_pass(&seq_array[seq1][0], &seq_array[seq2][0], n, m, &se1, &se2, &maxscore, g, gh);
reverse_pass(&seq_array[seq1][0], &seq_array[seq2][0], se1, se2, &sb1, &sb2, maxscore, g, gh);
print_ptr = 1;
last_print = 0;
diff(sb1-1, sb2-1, se1-sb1+1, se2-sb2+1, 0, 0, &print_ptr, &last_print, displ, seq1, seq2, g, gh);
mm_score = tracepath(sb1, sb2, &print_ptr, &last_print, displ, seq1, seq2);
if (len1 == 0 || len2 == 0) mm_score = 0.0;
else mm_score /= (double) MIN(len1,len2);
bench_output[si*nseqs+sj] = mm_score;
}
}
}
}
}
}
bots_message(" completed!\n");
return 0;
}
int pairalign_seq(int istart, int iend, int jstart, int jend)
{
int i, n, m, si, sj;
int len1, len2, maxres;
double gg, mm_score;
int *mat_xref, *matptr;
matptr = gon250mt;
mat_xref = def_aa_xref;
maxres = get_matrix(matptr, mat_xref, 10);
if (maxres == 0) return(-1);
for (si = 0; si < nseqs; si++) {
if ((n = seqlen_array[si+1]) != 0){
for (i = 1, len1 = 0; i <= n; i++) {
char c = seq_array[si+1][i];
if ((c != gap_pos1) && (c != gap_pos2)) len1++;
}
for (sj = si + 1; sj < nseqs; sj++) {
if ((m = seqlen_array[sj+1]) != 0){
int se1, se2, sb1, sb2, maxscore, seq1, seq2, g, gh;
int displ[2*MAX_ALN_LENGTH+1];
int print_ptr, last_print;
for (i = 1, len2 = 0; i <= m; i++) {
char c = seq_array[sj+1][i];
if ((c != gap_pos1) && (c != gap_pos2)) len2++;
}
gh = 10 * pw_ge_penalty;
gg = pw_go_penalty + log((double) MIN(n, m));
g = (mat_avscore <= 0) ? 20 * gg : 2 * mat_avscore * gg;
seq1 = si + 1;
seq2 = sj + 1;
forward_pass(&seq_array[seq1][0], &seq_array[seq2][0], n, m, &se1, &se2, &maxscore, g, gh);
reverse_pass(&seq_array[seq1][0], &seq_array[seq2][0], se1, se2, &sb1, &sb2, maxscore, g, gh);
print_ptr = 1;
last_print = 0;
diff(sb1-1, sb2-1, se1-sb1+1, se2-sb2+1, 0, 0, &print_ptr, &last_print, displ, seq1, seq2, g, gh);
mm_score = tracepath(sb1, sb2, &print_ptr, &last_print, displ, seq1, seq2);
if (len1 == 0 || len2 == 0) mm_score = 0.0;
else mm_score /= (double) MIN(len1,len2);
seq_output[si*nseqs+sj] = mm_score;
}
}
}
}
return 0;
}
/***********************************************************************
* :
**********************************************************************/
void init_matrix(void)
{
int i, j;
char c1, c2;
gap_pos1 = NUMRES - 2;
gap_pos2 = NUMRES - 1;
max_aa = strlen(amino_acid_codes) - 2;
for (i = 0; i < NUMRES; i++) def_aa_xref[i] = -1;
for (i = 0; (c1 = amino_acid_order[i]); i++)
for (j = 0; (c2 = amino_acid_codes[j]); j++)
if (c1 == c2) {def_aa_xref[i] = j; break;}
}
void pairalign_init (char *filename)
{
int i;
if (!filename || !filename[0]) {
bots_error(0, "Please specify an input file with the -f option\n");
}
init_matrix();
nseqs = readseqs(1,filename);
bots_message("Multiple Pairwise Alignment (%d sequences)\n",nseqs);
for (i = 1; i <= nseqs; i++)
bots_debug("Sequence %d: %s %6.d aa\n", i, names[i], seqlen_array[i]);
ktup = 1;
window = 5;
signif = 5;
gap_open = 10.0;
gap_extend = 0.2;
pw_go_penalty = 10.0;
pw_ge_penalty = 0.1;
}
void align_init ()
{
int i,j;
bench_output = (int *) malloc(sizeof(int)*nseqs*nseqs);
for(i = 0; i<nseqs; i++)
for(j = 0; j<nseqs; j++)
bench_output[i*nseqs+j] = 0;
}
void align()
{
pairalign(0, nseqs,0, nseqs);
}
void align_seq_init ()
{
int i,j;
seq_output = (int *) malloc(sizeof(int)*nseqs*nseqs);
bench_output = (int *) malloc(sizeof(int)*nseqs*nseqs);
for(i = 0; i<nseqs; i++)
for(j = 0; j<nseqs; j++)
seq_output[i*nseqs+j] = 0;
}
void align_seq()
{
pairalign_seq(0, nseqs,0, nseqs);
}
void align_end ()
{
int i,j;
for(i = 0; i<nseqs; i++)
for(j = 0; j<nseqs; j++)
if (bench_output[i*nseqs+j] != 0)
bots_debug("Benchmark sequences (%d:%d) Aligned. Score: %d\n",
i+1 , j+1 , (int) bench_output[i*nseqs+j]);
}
int align_verify ()
{
int i,j;
int result = BOTS_RESULT_SUCCESSFUL;
for(i = 0; i<nseqs; i++)
for(j = 0; j<nseqs; j++)
if (bench_output[i*nseqs+j] != seq_output[i*nseqs+j])
{
bots_message("Error: Optimized prot. (%3d:%3d)=%5d Sequential prot. (%3d:%3d)=%5d\n",
i+1, j+1, (int) bench_output[i*nseqs+j],
i+1, j+1, (int) seq_output[i*nseqs+j]);
result = BOTS_RESULT_UNSUCCESSFUL;
}
return result;
}
|
GB_unaryop__abs_int8_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int8_bool
// op(A') function: GB_tran__abs_int8_bool
// C type: int8_t
// A type: bool
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
bool
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int8_bool
(
int8_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int8_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
quick_sect_d.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <omp.h>
/* OpenMP Parallel Quicksort - Sections with depth checking
* Checking for depth allows for nested parallelism.
* @author: ANDREW VAILLANCOURT
* 2019
*/
int partition (int p, int r, int *data){
int x = data[p];
int k = p;
int l = r + 1;
int t;
while (1) {
do
k++;
while ((data[k] <= x) && (k < r));
do
l--;
while (data[l] > x);
while (k < l) {
t = data[k];
data[k] = data[l];
data[l] = t;
do
k++;
while (data[k] <= x);
do
l--;
while (data[l] > x);
}
t = data[p];
data[p] = data[l];
data[l] = t;
return l;
}
}
void seq_quick_sort (int p,int r,int *data){
if (p < r) {
int q = partition (p, r, data);
seq_quick_sort (p, q - 1, data);
seq_quick_sort (q + 1, r, data);
}
}
void quick_sort (int p, int r, int *data, int low_limit, int threads) {
if (p < r) {
if ((r - p) < low_limit || threads == 1) {
seq_quick_sort(p, r, data);
} else {
int q = partition(p, r, data);
#pragma omp parallel sections firstprivate(data, p, q, r)
{
#pragma omp section
quick_sort(p, q - 1, data, low_limit, threads/2);
#pragma omp section
quick_sort(q + 1, r, data, low_limit, threads - threads/2);
}
}
}
}
void validate_sort (int n, int *data){
int i;
for (i = 0; i < n - 1; i++) {
if (data[i] > data[i+1]) {
printf ("ERROR: Validate failed\n");
}
}
}
void insertion_sort(int a[], int size) {
int i;
for (i = 0; i < size; i++) {
int j, v = a[i];
for (j = i - 1; j >= 0; j--) {
if (a[j] <= v)
break;
a[j + 1] = a[j];
}
a[j + 1] = v;
}
}
int main (int argc, char *argv[]){
int i, n, low_limit;
int *data;
double start, end;
if (argc != 4) {
printf ("./sections num_elems threshold num_threads\n");
return 1;
}
n = atoi(argv[1]);
low_limit = atoi(argv[2]);
int threads = atoi(argv[3]); // Requested number of threads
int processors = omp_get_num_procs(); // Available processors
omp_set_nested(1);
if (omp_get_nested() != 1) {
puts("Warning: Nested parallelism desired but unavailable");
}
if (threads > processors) {
printf("Warning: %d threads requested, will run_omp on %d processors available\n",threads, processors);
}
// int max_threads = omp_get_max_threads(); // Max available threads
// if (threads > max_threads) // Requested threads are more than max available
// {
// printf("Error: Cannot use %d threads, only %d threads available\n",
// threads, max_threads);
// return 1;
// }
// Generate the array
data = (int *)malloc (sizeof (int) * n);
for ( i=0; i<n; i++ ) {
data[i] = rand();
}
start = omp_get_wtime();
quick_sort (0, n - 1, &data[0], low_limit, threads);
end = omp_get_wtime();
printf("%.4f\n", end - start);
validate_sort (n, &data[0]);
free (data);
return 0;
}
|
ctl_pager.c | /********************************************************************[libaroma]*
* Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*______________________________________________________________________________
*
* Filename : ctl_pager.c
* Description : pager control
*
* + This is part of libaroma, an embedded ui toolkit.
* + 27/03/15 - Author(s): Ahmad Amarullah
*
*/
#ifndef __libaroma_ctl_pager_c__
#define __libaroma_ctl_pager_c__
#include <aroma_internal.h>
#include "../ui/ui_internal.h"
#define _LIBAROMA_CTL_PAGER_HOLD_TIMING 300
#define _LIBAROMA_CTL_PAGER_ANI_TIMING 500
#define _LIBAROMA_CTL_PAGER_TOUCH_CLIENT_WAIT 120
/* HANDLER */
dword _libaroma_ctl_pager_msg(LIBAROMA_CONTROLP, LIBAROMA_MSGP);
void _libaroma_ctl_pager_draw (LIBAROMA_CONTROLP, LIBAROMA_CANVASP);
void _libaroma_ctl_pager_destroy(LIBAROMA_CONTROLP);
byte _libaroma_ctl_pager_thread(LIBAROMA_CONTROLP);
static LIBAROMA_CONTROL_HANDLER _libaroma_ctl_pager_handler={
message:_libaroma_ctl_pager_msg,
draw:_libaroma_ctl_pager_draw,
focus:NULL,
destroy:_libaroma_ctl_pager_destroy,
thread:_libaroma_ctl_pager_thread
};
/* window handler */
byte _libaroma_ctl_pager_window_invalidate(LIBAROMA_WINDOWP win, byte sync);
byte _libaroma_ctl_pager_window_sync(LIBAROMA_WINDOWP win,
int x,int y,int w,int h);
byte _libaroma_ctl_pager_window_updatebg(LIBAROMA_WINDOWP win);
/*
byte _libaroma_ctl_pager_window_control_draw_flush(
LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl,
LIBAROMA_CANVASP canvas,byte sync
);
byte _libaroma_ctl_pager_window_control_erasebg(
LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl,
LIBAROMA_CANVASP canvas
);
*/
byte _libaroma_ctl_pager_window_control_isvisible(
LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl
);
LIBAROMA_CANVASP _libaroma_ctl_pager_window_control_draw_begin(
LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl
);
static LIBAROMA_WINDOW_HANDLER _libaroma_ctl_pager_win_handler={
prefree:NULL,
postfree:NULL,
updatebg:_libaroma_ctl_pager_window_updatebg,
invalidate:_libaroma_ctl_pager_window_invalidate,
sync:_libaroma_ctl_pager_window_sync,
message_hooker:NULL,
control_draw_flush:NULL /*_libaroma_ctl_pager_window_control_draw_flush*/,
control_erasebg:NULL /*_libaroma_ctl_pager_window_control_erasebg*/,
control_isvisible:_libaroma_ctl_pager_window_control_isvisible,
control_draw_begin:_libaroma_ctl_pager_window_control_draw_begin
};
/*
* Structure : __LIBAROMA_CTL_PAGER
* Typedef : _LIBAROMA_CTL_PAGER, * _LIBAROMA_CTL_PAGERP
* Descriptions: button control internal structure
*/
typedef struct __LIBAROMA_CTL_PAGER _LIBAROMA_CTL_PAGER;
typedef struct __LIBAROMA_CTL_PAGER * _LIBAROMA_CTL_PAGERP;
struct __LIBAROMA_CTL_PAGER{
LIBAROMA_WINDOWP win;
int pagen;
int page_position;
long scroll_target_start;
int scroll_target_from_x;
int scroll_x;
int req_scroll_x;
int max_scroll_x;
byte allow_scroll;
int touch_x;
int touch_y;
long client_touch_start;
LIBAROMA_MSG pretouched_msg;
LIBAROMA_CONTROLP pretouched;
int scroll_duration;
LIBAROMA_FLING fling;
byte redraw;
LIBAROMA_MUTEX mutex;
byte on_direct_canvas;
byte need_direct_canvas;
LIBAROMA_CTL_PAGER_CONTROLLERP controller;
};
/*
* Function : _libaroma_ctl_pager_direct_canvas
* Return Value: byte
* Descriptions: set as direct canvas
*/
byte _libaroma_ctl_pager_direct_canvas(LIBAROMA_CONTROLP ctl, byte state){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_pager_handler, _LIBAROMA_CTL_PAGERP, 0
);
//libaroma_mutex_lock(me->mutex);
int xt = me->scroll_x; // me->page_position * ctl->w;
if (state){
if (!me->on_direct_canvas){
me->on_direct_canvas=1;
}
}
else{
if (me->on_direct_canvas){
LIBAROMA_CANVASP ccv = libaroma_control_draw_begin(ctl);
if (ccv) {
// libaroma_draw_ex(me->win->dc,ccv,0,0,xt,0,ccv->w,ccv->h,0,0xff);
libaroma_draw_ex(me->win->dc,ccv,xt,0,0,0,ccv->w,ccv->h,0,0xff);
libaroma_canvas_free(ccv);
}
me->on_direct_canvas=0;
}
}
//libaroma_mutex_unlock(me->mutex);
return 1;
} /* End of _libaroma_ctl_pager_direct_canvas */
/*
* Function : _libaroma_ctl_pager_window_invalidate
* Return Value: byte
* Descriptions: window invalidate
*/
byte _libaroma_ctl_pager_window_invalidate(LIBAROMA_WINDOWP win, byte sync){
LIBAROMA_CONTROLP ctl=(LIBAROMA_CONTROLP) win->client_data;
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_pager_handler, _LIBAROMA_CTL_PAGERP, 0
);
if (me->win!=win){
return 0;
}
if ((me->win->dc)&&(me->win->bg)){
libaroma_draw(me->win->dc,me->win->bg,0,0,0);
/* draw childs */
int i;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i=0;i<win->childn;i++){
/* draw no sync */
libaroma_control_draw(win->childs[i], 0);
}
}
if (sync){
return _libaroma_ctl_pager_window_sync(win,0,0,win->w,win->h);
}
return 1;
} /* End of _libaroma_ctl_pager_window_invalidate */
/*
* Function : _libaroma_ctl_pager_window_sync
* Return Value: byte
* Descriptions: window sync
*/
byte _libaroma_ctl_pager_window_sync(LIBAROMA_WINDOWP win,
int x,int y,int w,int h){
LIBAROMA_CONTROLP ctl=(LIBAROMA_CONTROLP) win->client_data;
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_pager_handler, _LIBAROMA_CTL_PAGERP, 0
);
if (me->win!=win){
return 0;
}
me->redraw=1;
return 1;
} /* End of _libaroma_ctl_pager_window_sync */
/*
* Function : _libaroma_ctl_pager_window_control_isvisible
* Return Value: byte
* Descriptions: check if control is visible
*/
byte _libaroma_ctl_pager_window_control_isvisible(
LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl
){
LIBAROMA_CONTROLP ctl=(LIBAROMA_CONTROLP) win->client_data;
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_pager_handler, _LIBAROMA_CTL_PAGERP, 0
);
libaroma_mutex_lock(me->mutex);
int xt = me->scroll_x; /*me->page_position * ctl->w;*/
int ww = ctl->w;
if (!win->active){
xt=0;
ww=win->w;
}
int sx = cctl->x-xt;
int sy = cctl->y;
libaroma_mutex_unlock(me->mutex);
if (sx+cctl->w<0){
return 0;
}
if (sx>ww){
return 0;
}
if (sy+cctl->h<0){
return 0;
}
if (sy>win->h){
return 0;
}
return 1;
} /* End of _libaroma_ctl_pager_window_control_isvisible */
/*
* Function : _libaroma_ctl_pager_window_control_draw_begin
* Return Value: LIBAROMA_CANVASP
* Descriptions: get canvas for child control
*/
LIBAROMA_CANVASP _libaroma_ctl_pager_window_control_draw_begin(
LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl
){
LIBAROMA_CONTROLP ctl=(LIBAROMA_CONTROLP) win->client_data;
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_pager_handler, _LIBAROMA_CTL_PAGERP, NULL
);
LIBAROMA_CANVASP c=NULL;
libaroma_mutex_lock(me->mutex);
if (!me->on_direct_canvas){
if (win->dc==NULL){
libaroma_mutex_unlock(me->mutex);
return NULL;
}
c = libaroma_canvas_area(
win->dc,
cctl->x, cctl->y, cctl->w, cctl->h
);
}
else{
int xt = me->scroll_x;
int x = cctl->x - xt;
int y = cctl->y;
int w = cctl->w;
int h = cctl->h;
LIBAROMA_CANVASP ccv = libaroma_control_draw_begin(ctl);
if (ccv){
if ((ccv->w>x)&&(ccv->h>y)){
c = libaroma_canvas_area(
ccv,x,y,w,h
);
}
libaroma_canvas_free(ccv);
}
else{
libaroma_mutex_unlock(me->mutex);
return NULL;
}
}
libaroma_mutex_unlock(me->mutex);
return c;
} /* End of _libaroma_ctl_pager_window_control_draw_begin */
/*
* Function : _libaroma_ctl_pager_window_updatebg
* Return Value: byte
* Descriptions: window update background
*/
byte _libaroma_ctl_pager_window_updatebg(LIBAROMA_WINDOWP win){
LIBAROMA_CONTROLP ctl=(LIBAROMA_CONTROLP) win->client_data;
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_pager_handler, _LIBAROMA_CTL_PAGERP, 0
);
if (me->win!=win){
return 0;
}
int w = win->w;
int h = win->h;
/* draw background */
if (win->bg!=NULL){
if ((win->bg->w==w)&&(win->bg->h==h)){
/* not need recreate background */
return 1;
}
libaroma_canvas_free(win->bg);
}
win->bg = libaroma_canvas(w,h);
libaroma_canvas_setcolor(
win->bg,
libaroma_colorget(ctl,NULL)->window_bg,
0xff
);
return 1;
} /* End of _libaroma_ctl_pager_window_sync */
/*
* Function : _libaroma_ctl_pager_draw
* Return Value: void
* Descriptions: draw callback
*/
void _libaroma_ctl_pager_draw(
LIBAROMA_CONTROLP ctl,
LIBAROMA_CANVASP c){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_pager_handler, _LIBAROMA_CTL_PAGERP,
);
if (!me->win->active){
if (!me->redraw){
_libaroma_ctl_pager_window_invalidate(me->win,0);
}
}
/* draw window canvas */
libaroma_mutex_lock(me->mutex);
if (!me->on_direct_canvas){
if (me->win->dc){
libaroma_draw_ex(c,me->win->dc,0,0,me->scroll_x,0,
c->w,c->h,0,0xff);
}
else{
/* just erase background */
libaroma_control_erasebg(ctl,c);
}
/* need revert to direct canvas */
if (me->need_direct_canvas){
me->need_direct_canvas=0;
//libaroma_mutex_unlock(me->mutex);
_libaroma_ctl_pager_direct_canvas(ctl, 1);
//libaroma_mutex_lock(me->mutex);
}
}
libaroma_mutex_unlock(me->mutex);
me->redraw=0;
} /* End of _libaroma_ctl_pager_draw */
/*
* Function : _libaroma_ctl_pager_thread
* Return Value: byte
* Descriptions: control thread callback
*/
byte _libaroma_ctl_pager_thread(LIBAROMA_CONTROLP ctl) {
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_pager_handler, _LIBAROMA_CTL_PAGERP, 0
);
byte is_draw = me->redraw;
if (me->win->active==1){
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel sections
{
#pragma omp section
{
#endif
/* pretouched */
libaroma_mutex_lock(me->mutex);
if ((me->client_touch_start!=0)&&
(libaroma_tick()-me->client_touch_start>
_LIBAROMA_CTL_PAGER_TOUCH_CLIENT_WAIT)){
me->client_touch_start=0;
if (me->pretouched!=NULL){
me->win->touched=me->pretouched;
me->pretouched=NULL;
if (me->win->touched->handler->message){
me->win->touched->handler->message(
me->win->touched,&me->pretouched_msg);
}
}
}
if (me->req_scroll_x!=-1){
/* direct request */
if (me->req_scroll_x!=me->scroll_x){
int move_sz = ((me->req_scroll_x-me->scroll_x)*64)>>8;
if (abs(move_sz)<2){
if (move_sz<0){
move_sz=-1;
}
else{
move_sz=1;
}
}
int target_sz = me->scroll_x+move_sz;
if (target_sz==me->req_scroll_x){
target_sz=me->req_scroll_x;
me->req_scroll_x=-1;
}
me->scroll_x=target_sz;
is_draw=1;
/* onscroll controller message */
if (me->controller){
if ((me->controller->pager==ctl)&&
(me->controller->controller)) {
if (me->controller->handler){
if (me->controller->handler->onscroll){
int scrollx=me->scroll_x;
libaroma_mutex_unlock(me->mutex);
me->controller->handler->onscroll(
me->controller->controller,
me->controller->pager,
scrollx,
me->win->w,
ctl->w,
me->page_position
);
libaroma_mutex_lock(me->mutex);
}
}
}
}
}
else{
me->req_scroll_x=-1;
}
}
libaroma_mutex_unlock(me->mutex);
/* fling */
if (me->scroll_target_start>0){
me->req_scroll_x=-1;
int xt = me->page_position * ctl->w;
int dxt= (xt - me->scroll_target_from_x);
float state = libaroma_control_state(
me->scroll_target_start,
MAX(100,me->scroll_duration)
);
state = libaroma_motion_fluid(state);
int difxt = dxt * (1.0-state);
libaroma_mutex_lock(me->mutex);
me->scroll_x = xt-difxt;
libaroma_mutex_unlock(me->mutex);
/* onscroll controller message */
if (me->controller){
if ((me->controller->pager==ctl)&&(me->controller->controller)) {
if (me->controller->handler){
if (me->controller->handler->onscroll){
me->controller->handler->onscroll(
me->controller->controller,
me->controller->pager,
me->scroll_x,
me->win->w,
ctl->w,
me->page_position
);
}
}
}
}
if (state>=1.0){
libaroma_mutex_lock(me->mutex);
me->scroll_x = xt;
me->scroll_target_start=0;
me->need_direct_canvas=1;
libaroma_mutex_unlock(me->mutex);
/* onscroll finish controller message */
if (me->controller){
if ((me->controller->pager==ctl)&&(me->controller->controller)) {
if (me->controller->handler){
if (me->controller->handler->onscroll_finish){
me->controller->handler->onscroll_finish(
me->controller->controller,
me->controller->pager,
me->page_position
);
}
}
}
}
}
is_draw=1;
}
#ifdef LIBAROMA_CONFIG_OPENMP
}
#pragma omp section
{
#endif
int i;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (i=0;i<me->win->childn;i++){
LIBAROMA_CONTROLP c=me->win->childs[i];
if (c->handler->thread!=NULL){
if (c->handler->thread(c)){
if (libaroma_control_draw(c,0)){
is_draw=1;
}
}
}
}
#ifdef LIBAROMA_CONFIG_OPENMP
}
}
#endif
}
return is_draw;
} /* End of _libaroma_ctl_pager_thread */
/*
* Function : _libaroma_ctl_pager_destroy
* Return Value: void
* Descriptions: destroy callback
*/
void _libaroma_ctl_pager_destroy(
LIBAROMA_CONTROLP ctl){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_pager_handler, _LIBAROMA_CTL_PAGERP,
);
if (me->controller){
if (me->controller->pager==ctl){
me->controller->pager=NULL;
}
}
libaroma_window_free(me->win);
libaroma_mutex_free(me->mutex);
free(me);
} /* End of _libaroma_ctl_pager_destroy */
/*
* Function : _libaroma_ctl_pager_msg
* Return Value: byte
* Descriptions: message callback
*/
dword _libaroma_ctl_pager_msg(
LIBAROMA_CONTROLP ctl,
LIBAROMA_MSGP msg){
/* internal check */
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_pager_handler, _LIBAROMA_CTL_PAGERP, 0
);
dword ret = 0;
LIBAROMA_WINDOWP win = me->win;
switch(msg->msg){
case LIBAROMA_MSG_WIN_ACTIVE:
{
if (!win->active){
int i;
win->active=1;
for (i=0;i<win->childn;i++){
if (win->childs[i]->handler->message){
win->childs[i]->handler->message(win->childs[i], msg);
}
}
libaroma_mutex_lock(me->mutex);
me->need_direct_canvas=1;
libaroma_mutex_unlock(me->mutex);
}
}
break;
case LIBAROMA_MSG_WIN_RESIZE:
{
int i;
for (i=0;i<win->childn;i++){
if (win->childs[i]->handler->message){
win->childs[i]->handler->message(win->childs[i], msg);
}
}
}
break;
case LIBAROMA_MSG_WIN_INACTIVE:
{
if (win->active){
libaroma_mutex_lock(me->mutex);
_libaroma_ctl_pager_direct_canvas(ctl, 0);
libaroma_mutex_unlock(me->mutex);
win->active=0;
int i;
for (i=0;i<win->childn;i++){
if (win->childs[i]->handler->message){
win->childs[i]->handler->message(win->childs[i], msg);
}
}
}
}
break;
case LIBAROMA_MSG_WIN_MEASURED:
{
libaroma_mutex_lock(me->mutex);
win->x = 0;
win->y = 0;
win->ax=ctl->x;
win->ay=ctl->y;
win->w = ctl->w*me->pagen;
win->h = ctl->h;
me->max_scroll_x = win->w-ctl->w;
if (win->dc){
if ((win->dc->w!=win->w)||(win->dc->h!=win->h)){
libaroma_canvas_free(win->dc);
win->dc=NULL;
}
}
if (!win->dc){
win->dc = libaroma_canvas(
win->w,
win->h
);
}
libaroma_mutex_unlock(me->mutex);
_libaroma_ctl_pager_window_updatebg(win);
/* remeasured all childs */
int i;
for (i=0;i<win->childn;i++){
libaroma_window_measure(win,win->childs[i]);
}
}
break;
case LIBAROMA_MSG_TOUCH:
{
int x = msg->x;
int y = msg->y;
libaroma_window_calculate_pos(NULL,ctl,&x,&y);
int xt=me->page_position * ctl->w;
msg->x = x;
msg->y = y;
/* touch handler */
if (msg->state==LIBAROMA_HID_EV_STATE_DOWN){
libaroma_mutex_lock(me->mutex);
memcpy(&me->pretouched_msg,msg,sizeof(LIBAROMA_MSG));
me->pretouched_msg.x=x+xt;
win->touched = NULL;
me->pretouched=NULL;
libaroma_mutex_unlock(me->mutex);
if (me->scroll_target_start==0){
int i;
for (i=0;i<win->childn;i++){
if (_libaroma_window_is_inside(win->childs[i],x+xt,y)){
libaroma_mutex_lock(me->mutex);
me->pretouched = win->childs[i];
libaroma_mutex_unlock(me->mutex);
break;
}
}
}
libaroma_mutex_lock(me->mutex);
if (me->pretouched!=NULL){
if (me->pretouched->handler->message){
if (me->max_scroll_x>0){
me->client_touch_start=msg->sent; /*libaroma_tick();*/
}
else{
me->client_touch_start=msg->sent-
_LIBAROMA_CTL_PAGER_TOUCH_CLIENT_WAIT;
/*libaroma_tick()-_LIBAROMA_CTL_PAGER_TOUCH_CLIENT_WAIT;*/
}
}
else{
me->pretouched=NULL;
}
}
if (me->scroll_target_start==0){
if (me->max_scroll_x>0){
me->allow_scroll=2;
}
else{
me->allow_scroll=1;
}
}
else{
me->scroll_target_start=0;
me->allow_scroll=1;
}
me->touch_x=x;
me->touch_y=y;
libaroma_fling_down(&me->fling, x);
libaroma_mutex_unlock(me->mutex);
}
else if (win->touched!=NULL){
x+=xt;
msg->x=x;
if (win->touched->handler->message){
ret=win->touched->handler->message(win->touched, msg);
}
if (msg->state==LIBAROMA_HID_EV_STATE_UP){
win->touched=NULL;
}
}
else{
if (msg->state==LIBAROMA_HID_EV_STATE_MOVE){
libaroma_mutex_lock(me->mutex);
if (me->max_scroll_x>0){
byte is_first_scroll=0;
if (me->allow_scroll==2){
int move_sz = me->touch_x - x;
int move_sz_y = me->touch_y - y;
int scrdp=libaroma_dp(24);
if ((abs(move_sz_y)>=scrdp)&&(abs(move_sz_y)>=abs(move_sz))){
/* halt the scroll and send to control */
if (me->pretouched){
if (me->pretouched->handler->message){
msg->x=x+xt;
me->client_touch_start=0;
win->touched=me->pretouched;
me->pretouched=NULL;
/* send down & move message */
win->touched->handler->message(
win->touched,&me->pretouched_msg);
win->touched->handler->message(
win->touched,msg);
}
else{
me->pretouched=NULL;
}
me->client_touch_start=0;
me->allow_scroll=0;
me->touch_x=x;
me->touch_y=y;
me->redraw=1;
}
}
else if (abs(move_sz)>=scrdp){
is_first_scroll=1;
me->allow_scroll=1;
me->client_touch_start=0;
me->pretouched=NULL;
win->touched=NULL;
}
}
if ((me->allow_scroll==1)&&(me->touch_x!=x)){
if (me->max_scroll_x>0){
_libaroma_ctl_pager_direct_canvas(ctl, 0);
int move_sz = me->touch_x - x;
if ((is_first_scroll)||(me->req_scroll_x!=-1)){
if (me->req_scroll_x==-1){
me->req_scroll_x=me->scroll_x;
}
int scroll_x = me->req_scroll_x + move_sz;
if (scroll_x<0){
scroll_x=0;
}
if (scroll_x>me->max_scroll_x){
scroll_x=me->max_scroll_x;
}
me->req_scroll_x=scroll_x;
}
else{
int scroll_x = me->scroll_x + move_sz;
if (scroll_x<0){
scroll_x=0;
}
if (scroll_x>me->max_scroll_x){
scroll_x=me->max_scroll_x;
}
if (scroll_x!=me->scroll_x){
me->scroll_x=scroll_x;
/* onscroll controller message */
if (me->controller){
if ((me->controller->pager==ctl)&&
(me->controller->controller)) {
if (me->controller->handler){
if (me->controller->handler->onscroll){
me->controller->handler->onscroll(
me->controller->controller,
me->controller->pager,
scroll_x,
me->win->w,
ctl->w,
me->page_position
);
}
}
}
}
me->redraw=1;
}
}
me->touch_x=x;
libaroma_fling_move(&me->fling, x);
}
}
}
libaroma_mutex_unlock(me->mutex);
}
else if (msg->state==LIBAROMA_HID_EV_STATE_UP){
libaroma_mutex_lock(me->mutex);
if (me->allow_scroll){
int vel = libaroma_fling_up(&me->fling, x);
int target_x = me->page_position;
double velocity = 0;
if (vel) {
if (me->page_position*ctl->w<me->scroll_x){
if (vel>0){
target_x++;
velocity = ((float) vel) / 800.0;
}
}
else if (me->page_position*ctl->w>me->scroll_x){
if (vel<0){
target_x--;
velocity = ((float) vel) / 800.0;
}
}
}
libaroma_mutex_unlock(me->mutex);
libaroma_ctl_pager_set_active_page(
ctl, target_x, velocity
);
libaroma_mutex_lock(me->mutex);
}
if (me->client_touch_start||me->pretouched){
if (me->pretouched->handler->message){
msg->x=x+xt;
/* send down & up message */
me->pretouched->handler->message(
me->pretouched,&me->pretouched_msg);
me->pretouched->handler->message(
me->pretouched,msg);
}
}
me->client_touch_start=0;
me->pretouched=NULL;
me->touch_x=x;
me->touch_y=y;
me->redraw=1;
libaroma_mutex_unlock(me->mutex);
}
}
}
break;
}
return ret;
} /* End of _libaroma_ctl_pager_msg */
/*
* Function : libaroma_ctl_pager_set_active_page
* Return Value: byte
* Descriptions: set active page
*/
byte libaroma_ctl_pager_set_active_page(
LIBAROMA_CONTROLP ctl, int page_id, double velocity){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_pager_handler, _LIBAROMA_CTL_PAGERP, 0
);
if ((page_id<0)||(page_id>=me->pagen)){
return 0;
}
libaroma_mutex_lock(me->mutex);
/* calculate slide duration */
int dx = (page_id * ctl->w) - me->scroll_x;
if (dx!=0) {
int width = ctl->w;
int halfWidth = width / 2;
float distanceRatio = MIN(1.0, 1.0 * abs(dx) / width);
distanceRatio -= 0.5;
distanceRatio *= 0.3;
distanceRatio = sin(distanceRatio);
float distance = halfWidth + halfWidth * distanceRatio;
int duration = 0;
if (velocity > 0) {
duration = round(15 * fabs(distance/velocity));
}
else{
float pageWidth = width;
float pageDelta = abs(dx)/pageWidth;
duration = (int) ((pageDelta + 1) * 300);
}
duration = MAX(100,MIN(duration, 600));
_libaroma_ctl_pager_direct_canvas(ctl, 0);
me->page_position = page_id;
me->scroll_duration = duration;
me->scroll_target_from_x = me->scroll_x;
me->scroll_target_start=libaroma_tick();
}
libaroma_mutex_unlock(me->mutex);
return 1;
} /* End of libaroma_ctl_pager_set_active_page */
/*
* Function : libaroma_ctl_pager_get_window
* Return Value: LIBAROMA_WINDOWP
* Descriptions: get window
*/
LIBAROMA_WINDOWP libaroma_ctl_pager_get_window(LIBAROMA_CONTROLP ctl){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_pager_handler, _LIBAROMA_CTL_PAGERP, NULL
);
return me->win;
} /* End of libaroma_ctl_pager_get_window */
/*
* Function : libaroma_ctl_pager_get_active_page
* Return Value: int
* Descriptions: get active page index
*/
int libaroma_ctl_pager_get_active_page(LIBAROMA_CONTROLP ctl){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_pager_handler, _LIBAROMA_CTL_PAGERP, -1
);
return me->page_position;
} /* End of libaroma_ctl_pager_get_active_page */
/*
* Function : libaroma_ctl_pager_get_pages
* Return Value: int
* Descriptions: get number of pages
*/
int libaroma_ctl_pager_get_pages(LIBAROMA_CONTROLP ctl){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_pager_handler, _LIBAROMA_CTL_PAGERP, 0
);
return me->pagen;
} /* End of libaroma_ctl_pager_get_pages */
/*
* Function : libaroma_ctl_pager_set_controller
* Return Value: byte
* Descriptions: set tab controller
*/
byte libaroma_ctl_pager_set_controller(
LIBAROMA_CONTROLP ctl,
LIBAROMA_CTL_PAGER_CONTROLLERP controller){
_LIBAROMA_CTL_CHECK(
_libaroma_ctl_pager_handler, _LIBAROMA_CTL_PAGERP, 0
);
libaroma_mutex_lock(me->mutex);
/* cleanup previeous controller */
if (me->controller){
if (me->controller->pager==ctl){
me->controller->pager=NULL;
}
}
me->controller = controller;
if (controller){
if (me->controller->pager!=ctl){
me->controller->pager=ctl;
}
}
libaroma_mutex_unlock(me->mutex);
return 1;
} /* End of libaroma_ctl_pager_set_controller */
/*
* Function : libaroma_ctl_pager
* Return Value: LIBAROMA_CONTROLP
* Descriptions: create button control
*/
LIBAROMA_CONTROLP libaroma_ctl_pager(
LIBAROMA_WINDOWP win,
word id, int pager_number,
int x, int y, int w, int h
){
if (!win){
ALOGW("pager need direct window attach");
return NULL;
}
/* init internal data */
_LIBAROMA_CTL_PAGERP me = (_LIBAROMA_CTL_PAGERP)
calloc(sizeof(_LIBAROMA_CTL_PAGER),1);
if (!me){
ALOGW("libaroma_ctl_pager alloc pager memory failed");
return NULL;
}
me->pagen=pager_number;
me->req_scroll_x=-1;
me->win = (LIBAROMA_WINDOWP) calloc(sizeof(LIBAROMA_WINDOW),1);
if (!me->win){
ALOGW("libaroma_ctl_pager alloc window data failed");
free(me);
return NULL;
}
me->win->handler=&_libaroma_ctl_pager_win_handler;
me->win->parent=win;
/* init control */
LIBAROMA_CONTROLP ctl =
libaroma_control_new(
id,
x, y, w, h,
libaroma_dp(48),libaroma_dp(48), /* min size */
(voidp) me,
&_libaroma_ctl_pager_handler,
NULL
);
if (!ctl){
free(me->win);
free(me);
return NULL;
}
me->win->client_data=ctl;
libaroma_mutex_init(me->mutex);
return libaroma_window_attach(win,ctl);
} /* End of libaroma_ctl_pager */
#endif /* __libaroma_ctl_pager_c__ */
|
stepper.c | #include "stepper.h"
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <stdbool.h>
#include <omp.h>
//ldoc on
/**
* ## Implementation
*
* ### Structure allocation
*/
central2d_t* central2d_init(float w, float h, int nx, int ny,
int nfield, flux_t flux, speed_t speed,
float cfl)
{
// We extend to a four cell buffer to avoid BC comm on odd time steps
int ng = 4;
central2d_t* sim = (central2d_t*) malloc(sizeof(central2d_t));
sim->nx = nx;
sim->ny = ny;
sim->ng = ng;
sim->nfield = nfield;
sim->dx = w/nx;
sim->dy = h/ny;
sim->flux = flux;
sim->speed = speed;
sim->cfl = cfl;
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
int nc = nx_all * ny_all;
int N = nfield * nc;
sim->u = (float*) malloc((4*N + 6*nx_all)* sizeof(float));
sim->v = sim->u + N;
sim->f = sim->u + 2*N;
sim->g = sim->u + 3*N;
sim->scratch = sim->u + 4*N;
return sim;
}
void central2d_free(central2d_t* sim)
{
free(sim->u);
free(sim);
}
int central2d_offset(central2d_t* sim, int k, int ix, int iy)
{
int nx = sim->nx, ny = sim->ny, ng = sim->ng;
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
return (k*ny_all+(ng+iy))*nx_all+(ng+ix);
}
/**
* ### Boundary conditions
*
* In finite volume methods, boundary conditions are typically applied by
* setting appropriate values in ghost cells. For our framework, we will
* apply periodic boundary conditions; that is, waves that exit one side
* of the domain will enter from the other side.
*
* We apply the conditions by assuming that the cells with coordinates
* `nghost <= ix <= nx+nghost` and `nghost <= iy <= ny+nghost` are
* "canonical", and setting the values for all other cells `(ix,iy)`
* to the corresponding canonical values `(ix+p*nx,iy+q*ny)` for some
* integers `p` and `q`.
*/
static inline
void copy_subgrid(float* restrict dst,
const float* restrict src,
int nx, int ny, int stride1, int stride2)
{
for (int iy = 0; iy < ny; ++iy)
for (int ix = 0; ix < nx; ++ix)
dst[iy*stride1+ix] = src[iy*stride2+ix];
}
void central2d_periodic(float* restrict u, const float* restrict src,
int nx, int ny, int ng, int partx, int party, int px, int py, int nfield)
{
// Stride and number per field
int s = nx + 2*ng;
int s2 = nx*partx + 2*ng;
int field_stride = (ny+2*ng)*s;
int field_stride2 = (ny*party+2*ng)*s2;
// Offsets of left, right, top, and bottom data blocks and ghost blocks
int l = nx, lg = 0;
int r = ng, rg = nx+ng;
int b = ny*s2, bg = 0;
int t = ng*s2, tg = (nx+ng)*s;
// Copy data into ghost cells on each side
for (int k = 0; k < nfield; ++k) {
float* uk = u + k*field_stride;
const float* srck = src + k*field_stride2;
int modxl = (px == 0? partx : px);
int modxr = (px == partx-1? 0 : px+1);
int modyb = (py == 0? party : py);
int modyt = (py == party-1? 0 : py+1);
copy_subgrid(uk+lg, srck+l*modxl, ng, ny+2*ng, s, s2);
copy_subgrid(uk+rg, srck+r+l*modxr, ng, ny+2*ng, s, s2);
copy_subgrid(uk+bg, srck+b*modyb, nx+2*ng, ng, s, s2);
copy_subgrid(uk+tg, srck+t+b*modyt, nx+2*ng, ng, s, s2);
}
}
/**
* ### Derivatives with limiters
*
* In order to advance the time step, we also need to estimate
* derivatives of the fluxes and the solution values at each cell.
* In order to maintain stability, we apply a limiter here.
*
* The minmod limiter *looks* like it should be expensive to computer,
* since superficially it seems to require a number of branches.
* We do something a little tricky, getting rid of the condition
* on the sign of the arguments using the `copysign` instruction.
* If the compiler does the "right" thing with `max` and `min`
* for floating point arguments (translating them to branch-free
* intrinsic operations), this implementation should be relatively fast.
*/
// Branch-free computation of minmod of two numbers times 2s
static inline
float xmin2s(float s, float a, float b) {
float sa = copysignf(s, a);
float sb = copysignf(s, b);
float abs_a = fabsf(a);
float abs_b = fabsf(b);
float min_abs = (abs_a < abs_b ? abs_a : abs_b);
return (sa+sb) * min_abs;
}
// Limited combined slope estimate
static inline
float limdiff(float um, float u0, float up) {
const float theta = 2.0;
const float quarter = 0.25;
float du1 = u0-um; // Difference to left
float du2 = up-u0; // Difference to right
float duc = up-um; // Twice centered difference
return xmin2s( quarter, xmin2s(theta, du1, du2), duc );
}
// Compute limited derivs
static inline
void limited_deriv1(float* restrict du,
const float* restrict u,
int ncell)
{
for (int i = 0; i < ncell; ++i)
du[i] = limdiff(u[i-1], u[i], u[i+1]);
}
// Compute limited derivs across stride
static inline
void limited_derivk(float* restrict du,
const float* restrict u,
int ncell, int stride)
{
assert(stride > 0);
for (int i = 0; i < ncell; ++i)
du[i] = limdiff(u[i-stride], u[i], u[i+stride]);
}
/**
* ### Advancing a time step
*
* Take one step of the numerical scheme. This consists of two pieces:
* a first-order corrector computed at a half time step, which is used
* to obtain new $F$ and $G$ values; and a corrector step that computes
* the solution at the full step. For full details, we refer to the
* [Jiang and Tadmor paper][jt].
*
* The `compute_step` function takes two arguments: the `io` flag
* which is the time step modulo 2 (0 if even, 1 if odd); and the `dt`
* flag, which actually determines the time step length. We need
* to know the even-vs-odd distinction because the Jiang-Tadmor
* scheme alternates between a primary grid (on even steps) and a
* staggered grid (on odd steps). This means that the data at $(i,j)$
* in an even step and the data at $(i,j)$ in an odd step represent
* values at different locations in space, offset by half a space step
* in each direction. Every other step, we shift things back by one
* mesh cell in each direction, essentially resetting to the primary
* indexing scheme.
*
* We're slightly tricky in the corrector in that we write
* $$
* v(i,j) = (s(i+1,j) + s(i,j)) - (d(i+1,j)-d(i,j))
* $$
* where $s(i,j)$ comprises the $u$ and $x$-derivative terms in the
* update formula, and $d(i,j)$ the $y$-derivative terms. This cuts
* the arithmetic cost a little (not that it's that big to start).
* It also makes it more obvious that we only need four rows worth
* of scratch space.
*/
// Predictor half-step
static
void central2d_predict(float* restrict v,
float* restrict scratch,
const float* restrict u,
const float* restrict f,
const float* restrict g,
float dtcdx2, float dtcdy2,
int nx, int ny, int nfield)
{
float* restrict fx = scratch;
float* restrict gy = scratch+nx;
for (int k = 0; k < nfield; ++k) {
for (int iy = 1; iy < ny-1; ++iy) {
int offset = (k*ny+iy)*nx+1;
limited_deriv1(fx+1, f+offset, nx-2);
limited_derivk(gy+1, g+offset, nx-2, nx);
for (int ix = 1; ix < nx-1; ++ix) {
int offset = (k*ny+iy)*nx+ix;
v[offset] = u[offset] - dtcdx2 * fx[ix] - dtcdy2 * gy[ix];
}
}
}
}
// Corrector
static
void central2d_correct_sd(float* restrict s,
float* restrict d,
const float* restrict ux,
const float* restrict uy,
const float* restrict u,
const float* restrict f,
const float* restrict g,
float dtcdx2, float dtcdy2,
int xlo, int xhi)
{
for (int ix = xlo; ix < xhi; ++ix)
s[ix] =
0.2500f * (u [ix] + u [ix+1]) +
0.0625f * (ux[ix] - ux[ix+1]) +
dtcdx2 * (f [ix] - f [ix+1]);
for (int ix = xlo; ix < xhi; ++ix)
d[ix] =
0.0625f * (uy[ix] + uy[ix+1]) +
dtcdy2 * (g [ix] + g [ix+1]);
}
// Corrector
static
void central2d_correct(float* restrict v,
float* restrict scratch,
const float* restrict u,
const float* restrict f,
const float* restrict g,
float dtcdx2, float dtcdy2,
int xlo, int xhi, int ylo, int yhi,
int nx, int ny, int nfield)
{
assert(0 <= xlo && xlo < xhi && xhi <= nx);
assert(0 <= ylo && ylo < yhi && yhi <= ny);
float* restrict ux = scratch;
float* restrict uy = scratch + nx;
float* restrict s0 = scratch + 2*nx;
float* restrict d0 = scratch + 3*nx;
float* restrict s1 = scratch + 4*nx;
float* restrict d1 = scratch + 5*nx;
for (int k = 0; k < nfield; ++k) {
float* restrict vk = v + k*ny*nx;
const float* restrict uk = u + k*ny*nx;
const float* restrict fk = f + k*ny*nx;
const float* restrict gk = g + k*ny*nx;
limited_deriv1(ux+1, uk+ylo*nx+1, nx-2);
limited_derivk(uy+1, uk+ylo*nx+1, nx-2, nx);
central2d_correct_sd(s1, d1, ux, uy,
uk + ylo*nx, fk + ylo*nx, gk + ylo*nx,
dtcdx2, dtcdy2, xlo, xhi);
for (int iy = ylo; iy < yhi; ++iy) {
float* tmp;
tmp = s0; s0 = s1; s1 = tmp;
tmp = d0; d0 = d1; d1 = tmp;
limited_deriv1(ux+1, uk+(iy+1)*nx+1, nx-2);
limited_derivk(uy+1, uk+(iy+1)*nx+1, nx-2, nx);
central2d_correct_sd(s1, d1, ux, uy,
uk + (iy+1)*nx, fk + (iy+1)*nx, gk + (iy+1)*nx,
dtcdx2, dtcdy2, xlo, xhi);
for (int ix = xlo; ix < xhi; ++ix)
vk[iy*nx+ix] = (s1[ix]+s0[ix])-(d1[ix]-d0[ix]);
}
}
}
static
void central2d_step(float* restrict u, float* restrict v,
float* restrict scratch,
float* restrict f,
float* restrict g,
int io, int nx, int ny, int ng,
int nfield, flux_t flux, speed_t speed,
float dt, float dx, float dy)
{
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
float dtcdx2 = 0.5 * dt / dx;
float dtcdy2 = 0.5 * dt / dy;
flux(f, g, u, nx_all * ny_all, nx_all * ny_all);
central2d_predict(v, scratch, u, f, g, dtcdx2, dtcdy2,
nx_all, ny_all, nfield);
// Flux values of f and g at half step
for (int iy = 1; iy < ny_all-1; ++iy) {
int jj = iy*nx_all+1;
flux(f+jj, g+jj, v+jj, nx_all-2, nx_all * ny_all);
}
central2d_correct(v+io*(nx_all+1), scratch, u, f, g, dtcdx2, dtcdy2,
ng-io, nx+ng-io,
ng-io, ny+ng-io,
nx_all, ny_all, nfield);
}
/**
* ### Advance a fixed time
*
* The `run` method advances from time 0 (initial conditions) to time
* `tfinal`. Note that `run` can be called repeatedly; for example,
* we might want to advance for a period of time, write out a picture,
* advance more, and write another picture. In this sense, `tfinal`
* should be interpreted as an offset from the time represented by
* the simulator at the start of the call, rather than as an absolute time.
*
* We always take an even number of steps so that the solution
* at the end lives on the main grid instead of the staggered grid.
*/
static
int central2d_xrun(float* restrict u, float* restrict v,
float* restrict scratch,
float* restrict f,
float* restrict g,
int nx, int ny, int ng,
int nfield, flux_t flux, speed_t speed,
float tfinal, float dx, float dy, float cfl)
{
int nstep = 0;
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
int c = nx_all * ny_all;
int N = nfield * c;
int partx = 2;
int party = 2;
omp_set_num_threads(partx*party);
int sx = nx/partx;
int sy = ny/party;
int sx_all = sx + 2*ng;
int sy_all = sy + 2*ng;
int pc = sx_all * sy_all;
int pN = nfield * pc;
bool done = false;
float t = 0;
while (!done) {
float cxy[2] = {1.0e-15f, 1.0e-15f};
float dt = cfl / fmaxf(cxy[0]/dx, cxy[1]/dy);
if (t + 2*dt >= tfinal) {
dt = (tfinal-t)/2;
done = true;
}
#pragma omp parallel
{
int j = omp_get_thread_num();
int px = j % partx;
int py = j/party;
float* pu = (float*) malloc(pN* sizeof(float));
float* pv = (float*) malloc(pN* sizeof(float));
float* pf = (float*) malloc(pN* sizeof(float));
float* pg = (float*) malloc(pN* sizeof(float));
float* pscratch = (float*) malloc((6*sx_all)* sizeof(float));
for (int k = 0; k < nfield; ++k) {
copy_subgrid(pu+k*pc+ng*sx_all+ng,u+k*c+nx_all*(ng+py*sy)+(ng+px*sx),sx,sy,sx_all,nx_all);
}
#pragma omp barrier
central2d_periodic(pu, u, sx, sy, ng, partx, party, px, py, nfield);
speed(cxy, pu, sx_all * sy_all, sx_all * sy_all);
central2d_step(pu, pv, pscratch, pf, pg,
0, sx+4, sy+4, ng-2,
nfield, flux, speed,
dt, dx, dy);
central2d_step(pv, pu, pscratch, pf, pg,
1, sx, sy, ng,
nfield, flux, speed,
dt, dx, dy);
#pragma omp barrier
for (int k = 0; k < nfield; ++k) {
copy_subgrid(u+k*c+nx_all*(ng+py*sy)+(ng+px*sx),pu+k*pc+ng*sx_all+ng,sx,sy,sx_all,nx_all);
}
free(pscratch);
free(pg);
free(pf);
free(pv);
free(pu);
}
t += 2*dt;
nstep += 2;
}
return nstep;
}
int central2d_run(central2d_t* sim, float tfinal)
{
return central2d_xrun(sim->u, sim->v, sim->scratch,
sim->f, sim->g,
sim->nx, sim->ny, sim->ng,
sim->nfield, sim->flux, sim->speed,
tfinal, sim->dx, sim->dy, sim->cfl);
}
|
gather_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: jxyang@openailab.com
* Update: hhchen@openailab.com
*/
#include "gather_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
typedef struct
{
int* in_shape; // the dim of the input
int axis;
int indices_num;
int dim_size;
int is_onnx;
} gather_param_t;
static int ref_gather_fp32(float* input, int* input_indices, float* output, gather_param_t* param, int num_thread)
{
float* out_ptr = output;
float* in_ptr = input;
int axis = param->axis;
int outer_size = 1;
int inner_size = 1;
int axis_size = param->in_shape[axis];
for (int i = 0; i < axis; i++)
{
outer_size *= param->in_shape[i];
}
for (int i = axis + 1; i < param->dim_size; i++)
{
inner_size *= param->in_shape[i];
// TLOG_ERR("inner_size size: %d %d \n", inner_size, param->in_shape[i]);
}
// #pragma omp parallel for num_threads(num_thread)
if (param->is_onnx)
{
for (int outer = 0; outer < outer_size; ++outer)
{
memcpy(out_ptr + (outer * param->indices_num) * inner_size,
in_ptr + (outer * axis_size + param->indices_num) * inner_size, inner_size * sizeof(float));
}
}
else
{
for (int outer = 0; outer < outer_size; ++outer)
{
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + (outer * param->indices_num + i) * inner_size,
in_ptr + (outer * axis_size + (int)input_indices[i]) * inner_size, inner_size * sizeof(float));
}
}
}
return 0;
}
static int ref_gather_uint8(uint8_t* input, int* input_indices, uint8_t* output, gather_param_t* param, int num_thread)
{
uint8_t* out_ptr = output;
uint8_t* in_ptr = input;
int axis = param->axis;
int outer_size = 1;
int inner_size = 1;
int axis_size = param->in_shape[axis];
for (int i = 0; i < axis; i++)
{
outer_size *= param->in_shape[i];
}
for (int i = axis + 1; i < param->dim_size; i++)
{
inner_size *= param->in_shape[i];
}
// #pragma omp parallel for num_threads(num_thread)
for (int outer = 0; outer < outer_size; ++outer)
{
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + (outer * param->indices_num + i) * inner_size,
in_ptr + (outer * axis_size + (int)input_indices[i]) * inner_size, inner_size);
}
}
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct gather_param* gather_param = (struct gather_param*)ir_node->op.param_mem;
gather_param_t* op_priv_info = (gather_param_t*)exec_node->ops_priv;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
op_priv_info->axis = gather_param->axis;
op_priv_info->indices_num = gather_param->indices_num;
op_priv_info->is_onnx = gather_param->is_onnx;
op_priv_info->in_shape = (int*)sys_malloc(input_tensor->dim_num * sizeof(int));
/* prerun now */
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct tensor* indices_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
gather_param_t* op_priv_info = (gather_param_t*)exec_node->ops_priv;
int out_size = input_tensor->elem_num;
// auto in_dim = input_tensor->GetShape().GetDim();
void* input = input_tensor->data;
void* indices_data = indices_tensor->data;
op_priv_info->dim_size = input_tensor->dim_num;
for (int i = 0; i < op_priv_info->dim_size; i++)
{
op_priv_info->in_shape[i] = input_tensor->dims[i];
}
// TLOG_ERR("in shape: %d %d %d %d\n", op_priv_info->in_shape[0], op_priv_info->in_shape[1], op_priv_info->in_shape[3], op_priv_info->in_shape[3]);
// int indices_num = op_param.indices_num;
void* output = output_tensor->data;
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_gather_fp32((float*)input, (int*)indices_data, (float*)output, op_priv_info, exec_graph->num_thread);
else if (input_tensor->data_type == TENGINE_DT_UINT8)
ret = ref_gather_uint8((uint8_t*)input, (int*)indices_data, (uint8_t*)output, op_priv_info, exec_graph->num_thread);
return ret;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
gather_param_t* op_priv_info = (gather_param_t*)sys_malloc(sizeof(gather_param_t));
if (op_priv_info == NULL)
{
return -1;
}
memset(op_priv_info, 0, sizeof(gather_param_t));
exec_node->ops_priv = op_priv_info;
return 0;
}
static int postrun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
gather_param_t* op_param = (gather_param_t*)exec_node->ops_priv;
sys_free(op_param->in_shape);
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
gather_param_t* op_priv_info = (gather_param_t*)exec_node->ops_priv;
sys_free(op_priv_info);
exec_node->ops_priv = NULL;
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_BEST;
}
static struct node_ops gather_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_gather_ref_op()
{
return register_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
int unregister_gather_ref_op()
{
return unregister_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
|
map.h | #pragma once
#include <stk/image/volume.h>
#include <functional>
namespace stk {
template<typename TVoxelInputType, typename TVoxelOutputType>
VolumeHelper<TVoxelOutputType> map(
const stk::VolumeHelper<TVoxelInputType>& image,
const std::function<TVoxelOutputType(TVoxelInputType)> op,
VolumeHelper<TVoxelOutputType> *out = nullptr
)
{
VolumeHelper<TVoxelOutputType> dest;
if (!out) {
dest.allocate(image.size());
out = &dest;
}
ASSERT(out->size() == image.size());
out->copy_meta_from(image);
#pragma omp parallel for
for (int z = 0; z < (int) image.size().z; ++z) {
for (int y = 0; y < (int) image.size().y; ++y) {
for (int x = 0; x < (int) image.size().x; ++x) {
(*out)(x, y, z) = op(image(x, y, z));
}
}
}
return *out;
}
} // namespace stk
|
solver_dcmt_omp.c | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
Copyright 2021, Liheng Zheng
This file is part of UBER.
UBER is free software: you can redistribute it and/or modify it under the
terms of the MIT License as published by Massachusetts Institute of
Technology. UBER is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the MIT License for more details.
You should have received a copy of the MIT License along with UBER. If not,
see <https://opensource.org/licenses/MIT>.
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
Gaussian pseudo-random number generator from Dynamic Creator of Mersenne
Twisters (DCMT) with OpenMP parallelization
Generating multiple independent streams of double precision pseudo-random
numbers in Gaussian distribution. This source file also serves as a wrapper
of the C library DCMT for Fortran applications.
Author: Liheng Zheng
Department of Physics and Astronomy
Rice University
2014
Exponential random number generator added, to be used in implementing Gobet
[2001] half-space reflection algorithm.
Liheng Zheng, 2016
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <inttypes.h>
#include <omp.h>
#include "dc.h"
/*
Exponent of the period is a Mersenne prime, it could be 521, 607, 1279, 2203,
2281,... The period of the PRN's is then 2^p - 1. For large p, the time needed to
find an mt_struct increases as O(p^3) (See README in the DCMT library).
*/
static int w = 32;
static int p = 2203;
static uint32_t mt_seed = 863;
static mt_struct *mts;
#pragma omp threadprivate(mts)
void get_mt_parameter_id_st_f_(int *id);
void sgenrand_mt_f_(int *seed);
void free_mt_struct_f_(void);
double unidev(mt_struct *mts);
double gasdev(mt_struct *mts);
double expdev(mt_struct *mts);
double unirand_(void);
double gausrand_(void);
double exprand_(void);
/*
The functions whose names are suffixed by underscores will be accessed by Fortran.
*/
void get_mt_parameter_id_st_f_(int *id)
{ /* Find the mt_struct parameters for thread *id. */
mts = get_mt_parameter_id_st(w, p, *id, mt_seed);
if( mts==NULL ){
printf(" Failed to get mt_struct parameter for id = %d\n",*id);
printf(" w = %d, p = %d, mt_seed = %d\n", w, p, mt_seed);
exit(1);
}
}
void sgenrand_mt_f_(int *seed)
{ /* initialize the psuedo-random number generator */
uint32_t useed;
if( *seed<0 ){
printf(" Warning: seed = %d < 0 passed to sgenrand_mt_f.\n",*seed);
}
useed = *seed;
sgenrand_mt(useed, mts);
}
void free_mt_struct_f_(void)
{ /* Release the memory claimed by mts. */
free_mt_struct(mts);
}
double unidev(mt_struct *mts)
{ /*
Obtaining double precision (53-bit resolution) uniform deviates in [0,1] from
32-bit unsigned random integers. Code was adapted from FORTRAN function
mt_genrand_double1 in Ken-Ichi Ishikawa's Multiple Stream Mersenne Twister PRNG:
<http://theo.phys.sci.hiroshima-u.ac.jp/~ishikawa/PRNG/mt_stream_en.html>
*/
double r;
double a, b;
uint32_t ia, ib;
ia = genrand_mt(mts); /* ia in [0,0xFFFFFFFF] */
ib = genrand_mt(mts); /* ib in [0,0xFFFFFFFF] */
ia >>= 5; /* ia in [0,2^27-1] */
ib >>= 6; /* ib in [0,2^26-1] */
a = (double) ia;
b = (double) ib;
/* ( a*2^26 + b ) in [0,2^53-1]
r = ( a*2^26 + b )/(2^53-1) */
r= (a*67108864.0 + b)*(1.0/9007199254740991.0);
return r;
}
double gasdev(mt_struct *mts)
{ /*
Normal distribution random number generator modified from the one on p.280 in
"Numerical Recipes in FORTRAN", second edition, by W. H. Press et al., Cambridge
University Press, 1992.
*/
static int iset = 0;
static double gset;
double fac, rsq, v1, v2;
if (iset == 0){
/* we don't have an extra deviate handy */
do{
/* so pick two uniform uumbers in the square extending from -1 to 1 in
each direction */
v1 = 2.0*unidev(mts) - 1.0;
v2 = 2.0*unidev(mts) - 1.0;
rsq = v1*v1 + v2*v2;
/* see if they are in the unit circle, and if not, try again */
} while (rsq>=1.0 || rsq==0.0);
/* now make the Box-Muller transformation to get two normal deviates. return
one and save the other for the next time. */
fac = sqrt( -2.0*log(rsq)/rsq );
gset = v1*fac;
/* set flag */
iset = 1;
return v2*fac;
}
else{
/* we have an extra deviate handy, so return it, and unset the flag */
iset = 0;
return gset;
}
}
double expdev(mt_struct *mts)
{ /* Exponential distribution random number with p(x) = exp(-x). */
double x, y;
do{
x = unidev(mts);
} while (x==0.0);
y = -log(x);
return y;
}
double unirand_(void)
{ /* Returns a double precision uniform deviate in [0,1] upon every call. */
double unirand = unidev(mts);
return unirand;
}
double gausrand_(void)
{ /* Returns a double precision normal deviate upon every call. */
double gausrand = gasdev(mts);
return gausrand;
}
double exprand_(void)
{ /* Returns a double precision exponential deviate upon every call. */
double exprand = expdev(mts);
return exprand;
}
#ifdef TEST
void dcuint32_(uint32_t intarr[])
{ /* Test function to return count number of unsigned 32-bit random integers. */
int i;
for( i=0; i<count; i++ )
intarr[i] = genrand_mt(mtss[i]);
}
void dcunidev_(double uniarr[])
{ /* Test function to return count number of double precision uniform deviates. */
int i;
for( i=0; i<count; i++ )
uniarr[i] = unidev(mtss[i]);
}
#endif
|
DRB066-pointernoaliasing-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Freshly allocated pointers do not alias to each other.
*/
#include "omprace.h"
#include <omp.h>
#include <stdlib.h>
void setup(int N)
{
double * m_pdv_sum = (double* ) malloc (sizeof (double) * N );
double * m_nvol = (double* ) malloc (sizeof (double) * N );
#pragma omp parallel for schedule(static)
for (int i=0; i < N; ++i )
{
m_pdv_sum[ i ] = 0.0;
m_nvol[ i ] = i*2.5;
}
free(m_pdv_sum);
free(m_nvol);
}
int main()
{
omprace_init();
int N =1000;
setup(N);
omprace_fini();
return 0;
}
|
lsearch.h | #pragma once
#ifndef FGC_LOCAL_SEARCH_H__
#define FGC_LOCAL_SEARCH_H__
#include "diskmat/diskmat.h"
#include "minicore/util/oracle.h"
#include "minicore/optim/kcenter.h"
#include "discreture/include/discreture.hpp"
#include "libsimdsampling/argminmax.h"
#include <atomic>
/*
* In this file, we use the local search heuristic for k-median.
* Originally described in "Local Search Heuristics for k-median and Facility Location Problems",
* Vijay Arya, Naveen Garg, Rohit Khandekar, Adam Meyerson, Kamesh Munagala, Vinayaka Pandit
* (http://theory.stanford.edu/~kamesh/lsearch.pdf)
*/
namespace minicore {
namespace graph {
template<typename MatType, typename IType=std::uint32_t, size_t N=16>
struct ExhaustiveSearcher {
using value_type = typename MatType::ElementType;
const MatType &mat_;
blaze::SmallArray<IType, N> bestsol_;
double current_cost_;
const unsigned k_;
ExhaustiveSearcher(const MatType &mat, unsigned k): mat_(mat), bestsol_(k_), current_cost_(std::numeric_limits<double>::max()), k_(k) {}
void run() {
blaze::SmallArray<IType, N> csol(k_);
const size_t nr = mat_.rows();
size_t nchecked = 0;
for(auto &&comb: discreture::combinations(nr, k_)) {
const double cost = blaze::sum(blaze::min<blaze::columnwise>(rows(mat_, comb.data(), comb.size())));
++nchecked;
if((nchecked & (nchecked - 1)) == 0)
std::fprintf(stderr, "iteration %zu completed\n", nchecked);
if(cost < current_cost_) {
std::fprintf(stderr, "Swapping to new center set with new cost = %g on iteration %zu\n", cost, nchecked);
current_cost_ = cost;
std::copy(comb.data(), comb.data() + comb.size(), bestsol_.data());
}
}
std::fprintf(stderr, "Best result: %g. Total number of combinations checked: %zu\n", current_cost_, nchecked);
}
};
template<typename MatType, typename IType=std::uint32_t>
auto make_kmed_esearcher(const MatType &mat, unsigned k) {
return ExhaustiveSearcher<MatType, IType>(mat, k);
}
template<typename MatType, typename IType>
struct LocalKMedSearcher {
using value_type = typename MatType::ElementType;
static_assert(std::is_integral_v<IType>, "IType must be integral");
const MatType &mat_;
shared::flat_hash_set<IType> sol_;
blaze::DynamicVector<IType> assignments_;
blaze::DynamicVector<typename MatType::ElementType, blaze::rowVector> current_costs_;
double current_cost_;
double eps_, initial_cost_, init_cost_div_;
IType k_;
const size_t nr_, nc_;
double diffthresh_;
blaze::DynamicVector<IType> ordering_;
uint32_t shuffle_:1;
// Set to 0 to avoid lazy search, 1 to only do local search, and 2 to do lazy search and then use exhaustive
uint32_t lazy_eval_:15;
uint32_t max_swap_n_:16;
// if(max_swap_n_ > 1), after exhaustive single-swap optimization, enables multiswap search.
// TODO: enable searches for multiswaps.
// Constructors
LocalKMedSearcher(const LocalKMedSearcher &o) = default;
LocalKMedSearcher(LocalKMedSearcher &&o) {
auto ptr = reinterpret_cast<const uint8_t *>(this);
std::memset(ptr, 0, sizeof(*this));
std::swap_ranges(ptr, ptr + sizeof(*this), reinterpret_cast<const uint8_t *>(std::addressof(o)));
}
template<typename IndexContainer=std::vector<uint32_t>>
LocalKMedSearcher(const MatType &mat, unsigned k, double eps=1e-8, uint64_t seed=0,
const IndexContainer *wc=nullptr, double initdiv=0.):
mat_(mat), assignments_(mat.columns(), 0),
// center_indices_(k),
//costs_(mat.columns(), std::numeric_limits<value_type>::max()),
//counts_(k),
current_cost_(std::numeric_limits<value_type>::max()),
eps_(eps),
k_(k), nr_(mat.rows()), nc_(mat.columns()),
ordering_(mat.rows()), shuffle_(true), lazy_eval_(2), max_swap_n_(1)
{
std::iota(ordering_.begin(), ordering_.end(), 0);
static_assert(std::is_integral_v<std::decay_t<decltype(wc->operator[](0))>>, "index container must contain integral values");
sol_.reserve(k);
init_cost_div_ = initdiv ? initdiv: double(mat.columns());
reseed(seed, true, wc);
}
template<typename It>
void assign_centers(It start, It end) {
sol_.clear();
sol_.insert(start, end);
assignments_ = 0;
}
template<typename RNG>
std::pair<shared::flat_hash_set<IType>, value_type> my_kcenter(RNG &rng, int extratries=2) {
shared::flat_hash_set<IType> ret;
ret.clear();
ret.insert(rng() % mat_.rows());
auto cid = *ret.begin();
constexpr bool rowitude = blz::IsRowMajorMatrix_v<MatType> ? blz::rowVector: blz::columnVector;
blz::DV<blz::ElementType_t<MatType>, rowitude> costs = row(mat_, cid);
using FT = value_type;
using IT = IType;
using PT = std::pair<FT, IT>;
auto fill_set = [&](auto &set) -> value_type {
while(set.size() < std::min(size_t(k_), mat_.rows())) {
PT argmaxcost;
argmaxcost.second = reservoir_simd::argmax(costs);
argmaxcost.first = costs[argmaxcost.second];
assert(set.find(argmaxcost.second) == set.end());
set.insert(argmaxcost.second);
costs = blz::min(row(mat_, argmaxcost.second), costs);
}
return blz::sum(costs);
};
value_type retcost = fill_set(ret), nextcost;
while(extratries > 0) {
IType cid = rng() % mat_.rows();
shared::flat_hash_set<IType> tmpset{cid};
costs = row(mat_, cid);
if((nextcost = fill_set(tmpset)) < retcost)
std::tie(nextcost, tmpset) = std::move(std::tie(retcost, ret));
}
return std::make_pair(ret, retcost);
}
template<typename IndexContainer=std::vector<uint32_t>>
void reseed(uint64_t seed, bool do_kcenter=false, const IndexContainer *wc=nullptr, unsigned extra_kc=0) {
assignments_ = 0;
current_cost_ = std::numeric_limits<value_type>::max();
wy::WyRand<IType, 2> rng(seed);
sol_.clear();
if(mat_.rows() <= k_) {
for(unsigned i = 0; i < mat_.rows(); ++i)
sol_.insert(i);
} else if(do_kcenter && mat_.rows() == mat_.columns()) {
std::fprintf(stderr, "Using kcenter\n");
std::tie(sol_, current_cost_) = my_kcenter(rng, extra_kc);
#ifndef NDEBUG
std::fprintf(stderr, "k_: %u. sol size: %zu. rows: %zu. columns: %zu. %d kcenter tries.\n", k_, sol_.size(),
mat_.rows(), mat_.columns(), extra_kc);
#endif
assert(sol_.size() == k_ || sol_.size() == mat_.rows());
} else {
if(!do_kcenter || wc == nullptr || wc->size() != mat_.columns()) {
while(sol_.size() < k_)
sol_.insert(rng() % mat_.rows());
} else {
//std::fprintf(stderr, "Using submatrix to perform kcenter approximation on an asymmetric matrix. rows/cols before: %zu, %zu\n", mat_.rows(), mat_.columns());
blaze::DynamicMatrix<value_type> subm = blaze::rows(mat_, wc->data(), wc->size());
//std::cerr << subm << '\n';
//std::fprintf(stderr, "subm rows: %zu\n", subm.rows());
uint32_t first = rng() % subm.rows();
std::vector<uint32_t> approx{first};
blaze::DynamicVector<value_type, blaze::rowVector> mincosts = row(subm, first);
std::vector<uint32_t> remaining(subm.rows());
std::iota(remaining.begin(), remaining.end(), 0u);
while(approx.size() < std::min(subm.rows(), size_t(k_))) {
//std::fputc('\n', stderr);
double maxcost = -1.;
unsigned maxind = -1;
for(unsigned i = 0; i < remaining.size(); ++i) {
auto ri = remaining[i];
if(std::find(approx.begin(), approx.end(), ri) != approx.end()) continue;
auto r = row(subm, ri);
auto cost = blaze::max(r);
if(cost > maxcost) maxcost = cost, maxind = i;
}
auto nextind = remaining[maxind];
approx.push_back(nextind);
std::swap(remaining[maxind], remaining.back());
remaining.pop_back();
mincosts = blaze::min(mincosts, row(subm, nextind));
}
for(auto i: approx)
sol_.insert(wc->at(i));
while(sol_.size() < k_) {
// Add random entries until desired sizeA
sol_.insert(rng() % mat_.rows());
}
//std::fprintf(stderr, "used submatrix. sol size: %zu\n", sol_.size());
}
}
}
template<typename Container>
double cost_for_sol(const Container &c) const {
double ret = 0.;
OMP_PRAGMA("omp parallel for reduction(+:ret)")
for(size_t i = 0; i < mat_.columns(); ++i) {
auto col = column(mat_, i);
auto it = c.begin();
value_type minv = col[*it];
while(++it != c.end())
minv = std::min(col[*it], minv);
ret += minv;
}
return ret;
}
// Setup/Utilities
void assign() {
assert(assignments_.size() == nc_);
std::fprintf(stderr, "rows: %zu. cols: %zu. sol size: %zu. k: %u\n",
mat_.rows(), mat_.columns(), sol_.size(), k_);
assert(sol_.size() == k_ || sol_.size() == mat_.rows());
auto it = sol_.begin();
const auto eit = sol_.end();
assignments_ = *it;
current_costs_ = row(mat_, *it);
while(++it != eit) {
auto center = *it;
auto r = row(mat_, center);
OMP_PFOR
for(size_t ci = 0; ci < nc_; ++ci) {
auto asn = assignments_[ci];
if(const auto newcost = r[ci];
newcost < mat_(asn, ci))
{
current_costs_[ci] = newcost;
assignments_[ci] = center;
}
}
}
DBG_ONLY(std::fprintf(stderr, "Set assignments for size %zu\n", assignments_.size());)
current_cost_ = cost_for_sol(sol_);
DBG_ONLY(std::fprintf(stderr, "Got costs for size %zu with centers size = %zu\n", assignments_.size(), sol_.size());)
initial_cost_ = current_cost_ / 2 / init_cost_div_;
}
double evaluate_swap(IType newcenter, IType oldcenter, bool single_threaded=false) const {
blaze::SmallArray<IType, 16> as(sol_.begin(), sol_.end());
*std::find(as.begin(), as.end(), oldcenter) = newcenter;
double cost;
if(single_threaded) {
cost = blaze::serial(blaze::sum(blaze::serial(blaze::min<blaze::columnwise>(rows(mat_, as)))));
} else cost = blaze::sum(blaze::min<blaze::columnwise>(rows(mat_, as)));
return current_cost_ - cost;
}
template<size_t N, typename IndexType>
double evaluate_multiswap(const IndexType *newcenter, const IndexType *oldcenter, bool single_threaded=false) const {
blaze::SmallArray<IType, 16> as(sol_.begin(), sol_.end());
shared::sort(as.begin(), as.end());
for(size_t i = 0; i < N; ++i) {
*std::find(as.begin(), as.end(), oldcenter[i]) = newcenter[i];
}
double cost;
if(single_threaded) {
cost = blaze::serial(blaze::sum(blaze::serial(blaze::min<blaze::columnwise>(rows(mat_, as)))));
} else
cost = blaze::sum(blaze::min<blaze::columnwise>(rows(mat_, as)));
return current_cost_ - cost;
}
template<typename IndexType>
double evaluate_multiswap_rt(const IndexType *newcenter, const IndexType *oldcenter, size_t N, bool single_threaded=false) const {
switch(N) {
case 2: return evaluate_multiswap<2>(newcenter, oldcenter, single_threaded);
case 3: return evaluate_multiswap<3>(newcenter, oldcenter, single_threaded);
}
blaze::SmallArray<IType, 16> as(sol_.begin(), sol_.end());
for(size_t i = 0; i < N; ++i) {
*std::find(as.begin(), as.end(), oldcenter[i]) = newcenter[i];
}
shared::sort(as.begin(), as.end());
double cost;
if(single_threaded) {
cost = blaze::serial(blaze::sum(blaze::serial(blaze::min<blaze::columnwise>(rows(mat_, as)))));
} else
cost = blaze::sum(blaze::min<blaze::columnwise>(rows(mat_, as)));
return current_cost_ - cost;
}
template<size_t N>
double lazy_evaluate_multiswap(const IType *newcenters, const IType *oldcenters) const {
// Instead of performing the full recalculation, do lazy calculation.
//
std::vector<IType> tmp(sol_.begin(), sol_.end());
for(unsigned i = 0; i < N; ++i)
tmp.erase(std::find(tmp.begin(), tmp.end(), oldcenters[i]));
std::sort(tmp.begin(), tmp.end());
// Instead of performing the full recalculation, do lazy calculation.
if(current_costs_.size() != nc_) { // If not calculated, calculate
auto it = sol_.begin();
OMP_CRITICAL
{
current_costs_ = row(mat_, *it BLAZE_CHECK_DEBUG);
}
while(++it != sol_.end()) {
current_costs_ = blaze::min(current_costs_, row(mat_, *it BLAZE_CHECK_DEBUG));
}
}
blaze::DynamicVector<typename MatType::ElementType, blaze::rowVector> newptr = blaze::min<blaze::rowwise>(rows(mat_, newcenters, N));
blaze::DynamicVector<typename MatType::ElementType, blaze::rowVector> oldptr = blaze::min<blaze::rowwise>(rows(mat_, oldcenters, N));
double diff = 0.;
#ifdef _OPENMP
_Pragma("omp parallel for reduction(+:diff)")
#endif
for(size_t i = 0; i < nc_; ++i) {
auto ccost = current_costs_[i];
if(newptr[i] < ccost) {
auto sub = ccost - newptr[i];
diff += sub;
} else if(ccost == oldptr[i]) {
auto oldbest = blaze::min(blaze::elements(blaze::column(mat_, i), tmp.data(), tmp.size()));
auto sub = ccost - std::min(oldbest, newptr[i]);
diff += sub;
}
}
return diff;
}
// Getters
auto k() const {
return k_;
}
void run_lazy() {
#if 0
shared::flat_hash_map<IType, std::vector<IType>> current_assignments;
for(size_t i = 0; i < assignments_.size(); ++i) {
current_assignments[assignments_[i]].push_back(i);
}
#endif
size_t total = 0;
std::vector<IType> newindices(sol_.begin(), sol_.end());
next:
for(const auto oldcenter: sol_) {
newindices.assign(sol_.begin(), sol_.end());
std::swap(*std::find(newindices.begin(), newindices.end(), oldcenter), newindices.back());
if(shuffle_) {
wy::WyRand<uint64_t, 2> rng(total);
std::shuffle(ordering_.begin(), ordering_.end(), rng);
}
// Make a vector with the original solution, but replace the old value with the new value
for(size_t pi = 0; pi < nr_; ++pi) {
auto potential_index = ordering_[pi];
if(sol_.find(potential_index) != sol_.end() || potential_index == oldcenter) continue;
newindices.back() = potential_index;
assert(std::find(newindices.begin(), newindices.end(), oldcenter) == newindices.end());
double val = 0.;
auto newptr = row(mat_, potential_index);
#ifdef _OPENMP
#pragma omp parallel for reduction(+:val)
#endif
for(size_t i = 0; i < nc_; ++i) {
auto oldcost = current_costs_[i];
if(newptr[i] < oldcost) {
auto diff = oldcost - newptr[i];
val += diff;
} else if(assignments_[i] == oldcenter) {
auto mincost = blaze::min(blaze::elements(blaze::column(mat_, i), newindices.data(), newindices.size()));
auto diff = oldcost - mincost;
val += diff;
}
}
assert(sol_.size() == k_);
// Only calculate exhaustively if the lazy form returns yes.
if(val > diffthresh_ && (val = evaluate_swap(potential_index, oldcenter)) > diffthresh_) {
assert(sol_.size() == k_);
sol_.erase(oldcenter);
sol_.insert(potential_index);
assert(sol_.size() == k_);
assign();
//current_cost_ = blaze::sum(current_costs_);
++total;
std::fprintf(stderr, "Swap number %zu updated with delta %.12g to new cost with cost %0.12g\n", total, val, current_cost_);
goto next;
}
}
}
std::fprintf(stderr, "Finished in %zu swaps by exhausting all potential improvements. Final cost: %f\n",
total, current_cost_);
}
void run_multi(unsigned nswap=1) {
if(mat_.rows() <= k_) return;
if(nswap == 1) {
run();
return;
}
if(nswap >= k_) throw std::runtime_error("nswap >= k_");
assign();
const double diffthresh = initial_cost_ / k_ * eps_;
diffthresh_ = diffthresh;
next:
{
blaze::DynamicVector<IType> csol(sol_.size());
std::copy(sol_.begin(), sol_.end(), csol.data());
blaze::DynamicVector<IType> swap_in(nc_ - sol_.size());
blaze::DynamicVector<IType> inargs(nswap), outargs(nswap);
for(auto &&swap_out_comb: discreture::combinations(csol.size(), nswap)) {
for(auto &&swap_in_comb: discreture::combinations(swap_in.size(), nswap)) {
auto v = evaluate_multiswap_rt(swap_in_comb.data(), swap_out_comb.data(), nswap);
if(v >= diffthresh_) {
for(auto v: swap_out_comb) sol_.erase(v);
sol_.insert(swap_in_comb.begin(), swap_in_comb.end());
current_cost_ -= v;
goto next;
}
}
}
}
}
void run() {
assign();
const double diffthresh = initial_cost_ / k_ * eps_;
diffthresh_ = diffthresh;
if(mat_.rows() <= k_) return;
if(lazy_eval_) {
run_lazy();
if(lazy_eval_ > 1)
return;
}
//const double diffthresh = 0.;
std::fprintf(stderr, "diffthresh: %f\n", diffthresh);
size_t total = 0;
next:
for(const auto oldcenter: sol_) {
if(shuffle_) {
wy::WyRand<uint64_t, 2> rng(total);
std::shuffle(ordering_.begin(), ordering_.end(), rng);
}
std::vector<IType> newindices(sol_.begin(), sol_.end());
for(size_t pi = 0; pi < nr_; ++pi) {
size_t potential_index = ordering_[pi];
if(sol_.find(potential_index) != sol_.end()) continue;
if(const auto val = evaluate_swap(potential_index, oldcenter, true);
val > diffthresh) {
#ifndef NDEBUG
std::fprintf(stderr, "Swapping %zu for %u. Swap number %zu. Current cost: %g. Improvement: %g. Threshold: %g.\n", potential_index, oldcenter, total + 1, current_cost_, val, diffthresh);
#endif
sol_.erase(oldcenter);
sol_.insert(potential_index);
++total;
current_cost_ -= val;
std::fprintf(stderr, "Swap number %zu with cost %0.12g\n", total, current_cost_);
goto next;
}
}
}
std::fprintf(stderr, "Finished in %zu swaps by exhausting all potential improvements. Final cost: %f\n",
total, current_cost_);
if(max_swap_n_ > 1) {
std::fprintf(stderr, "max_swap_n_ %u set. Searching multiswaps\n", max_swap_n_);
run_multi(max_swap_n_);
}
}
void exhaustive_manual_check() {
const std::vector<IType> csol(sol_.begin(), sol_.end());
std::vector<IType> wsol = csol, fsol = csol;
double ccost = current_cost_;
#ifndef NDEBUG
double ocost = current_cost_;
#endif
size_t extra_rounds = 0;
bool improvement_made;
start:
improvement_made = false;
for(size_t si = 0; si < k_; ++si) {
for(size_t ci = 0; ci < nr_; ++ci) {
if(std::find(wsol.begin(), wsol.end(), ci) != wsol.end()) continue;
wsol[si] = ci;
const double cost = blaze::sum(blaze::min<blaze::columnwise>(rows(mat_, wsol)));
if(cost < ccost) {
std::fprintf(stderr, "Found a better one: %g vs %g (%g)\n", cost, ccost, ccost - cost);
ccost = cost;
fsol = wsol;
wsol = fsol;
improvement_made = true;
++extra_rounds;
goto start;
}
}
wsol[si] = csol[si];
}
if(improvement_made) goto start;
current_cost_ = ccost;
#ifndef NDEBUG
std::fprintf(stderr, "improved cost for %zu rounds and a total improvemnet of %g\n", extra_rounds, ocost - current_cost_);
//assert(std::abs(ocost - current_cost_) < ((initial_cost_ / k_ * eps_) + 0.1)); // 1e-5 for numeric stability issues
#endif
}
};
template<typename Mat, typename IType=std::uint32_t, typename IndexContainer=std::vector<uint32_t>>
auto make_kmed_lsearcher(const Mat &mat, unsigned k, double eps=0.01, uint64_t seed=0,
const IndexContainer *wc=nullptr, double initdiv=0.) {
return LocalKMedSearcher<Mat, IType>(mat, k, eps, seed, wc, initdiv);
}
} // graph
using graph::make_kmed_esearcher;
using graph::make_kmed_lsearcher;
using graph::LocalKMedSearcher;
using graph::ExhaustiveSearcher;
} // minicore
#endif /* FGC_LOCAL_SEARCH_H__ */
|
zboxloop.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "_hypre_utilities.h"
#include "HYPRE_struct_ls.h"
#include "HYPRE_krylov.h"
#include "_hypre_struct_mv.h"
#include "_hypre_struct_mv.hpp"
/*--------------------------------------------------------------------------
* Test driver to time new boxloops and compare to the old ones
*--------------------------------------------------------------------------*/
#define DEVICE_VAR
hypre_int
main( hypre_int argc,
char *argv[] )
{
HYPRE_Int arg_index;
HYPRE_Int print_usage;
HYPRE_Int nx, ny, nz;
HYPRE_Int P, Q, R;
HYPRE_Int time_index;
HYPRE_Int num_procs, myid;
HYPRE_Int dim;
HYPRE_Int rep, reps, fail, sum;
HYPRE_Int size;
hypre_Box *x1_data_box, *x2_data_box, *x3_data_box, *x4_data_box;
//HYPRE_Int xi1, xi2, xi3, xi4;
HYPRE_Int xi1;
HYPRE_Real *xp1, *xp2, *xp3, *xp4;
hypre_Index loop_size, start, unit_stride, index;
/*-----------------------------------------------------------
* Initialize some stuff
*-----------------------------------------------------------*/
/* Initialize MPI */
hypre_MPI_Init(&argc, &argv);
hypre_MPI_Comm_size(hypre_MPI_COMM_WORLD, &num_procs );
hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid );
/*-----------------------------------------------------------
* Set defaults
*-----------------------------------------------------------*/
dim = 3;
nx = 10;
ny = 10;
nz = 10;
P = num_procs;
Q = 1;
R = 1;
/*-----------------------------------------------------------
* Parse command line
*-----------------------------------------------------------*/
print_usage = 0;
arg_index = 1;
while (arg_index < argc)
{
if ( strcmp(argv[arg_index], "-n") == 0 )
{
arg_index++;
nx = atoi(argv[arg_index++]);
ny = atoi(argv[arg_index++]);
nz = atoi(argv[arg_index++]);
}
else if ( strcmp(argv[arg_index], "-P") == 0 )
{
arg_index++;
P = atoi(argv[arg_index++]);
Q = atoi(argv[arg_index++]);
R = atoi(argv[arg_index++]);
}
else if ( strcmp(argv[arg_index], "-d") == 0 )
{
arg_index++;
dim = atoi(argv[arg_index++]);
}
else if ( strcmp(argv[arg_index], "-help") == 0 )
{
print_usage = 1;
break;
}
else
{
arg_index++;
}
}
/*-----------------------------------------------------------
* Print usage info
*-----------------------------------------------------------*/
if ( (print_usage) && (myid == 0) )
{
hypre_printf("\n");
hypre_printf("Usage: %s [<options>]\n", argv[0]);
hypre_printf("\n");
hypre_printf(" -n <nx> <ny> <nz> : problem size per block\n");
hypre_printf(" -P <Px> <Py> <Pz> : processor topology\n");
hypre_printf(" -d <dim> : problem dimension (2 or 3)\n");
hypre_printf("\n");
}
if ( print_usage )
{
exit(1);
}
/*-----------------------------------------------------------
* Check a few things
*-----------------------------------------------------------*/
if ((P*Q*R) > num_procs)
{
if (myid == 0)
{
hypre_printf("Error: PxQxR is more than the number of processors\n");
}
exit(1);
}
else if ((P*Q*R) < num_procs)
{
if (myid == 0)
{
hypre_printf("Warning: PxQxR is less than the number of processors\n");
}
}
/*-----------------------------------------------------------
* Initialize some stuff
*-----------------------------------------------------------*/
hypre_SetIndex3(start, 1, 1, 1);
hypre_SetIndex3(loop_size, nx, ny, nz);
hypre_SetIndex3(unit_stride, 1, 1, 1);
x1_data_box = hypre_BoxCreate(dim);
x2_data_box = hypre_BoxCreate(dim);
x3_data_box = hypre_BoxCreate(dim);
x4_data_box = hypre_BoxCreate(dim);
hypre_SetIndex3(hypre_BoxIMin(x1_data_box), 0, 0, 0);
hypre_SetIndex3(hypre_BoxIMax(x1_data_box), nx+1, ny+1, nz+1);
hypre_CopyBox(x1_data_box, x2_data_box);
hypre_CopyBox(x1_data_box, x3_data_box);
hypre_CopyBox(x1_data_box, x4_data_box);
size = (nx+2)*(ny+2)*(nz+2);
xp1 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST);
xp2 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST);
xp3 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST);
xp4 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST);
reps = 1000000000/(nx*ny*nz+1000);
/*-----------------------------------------------------------
* Print driver parameters
*-----------------------------------------------------------*/
if (myid == 0)
{
hypre_printf("Running with these driver parameters:\n");
hypre_printf(" (nx, ny, nz) = (%d, %d, %d)\n", nx, ny, nz);
hypre_printf(" (Px, Py, Pz) = (%d, %d, %d)\n", P, Q, R);
hypre_printf(" dim = %d\n", dim);
hypre_printf(" reps = %d\n", reps);
}
/*-----------------------------------------------------------
* Check new boxloops
*-----------------------------------------------------------*/
/* xp1 is already initialized to 0 */
zypre_BoxLoop1Begin(dim, loop_size,
x1_data_box, start, unit_stride, xi1);
zypre_BoxLoop1For(xi1)
{
xp1[xi1] ++;
}
zypre_BoxLoop1End(xi1);
/* Use old boxloop to check that values are set to 1 */
fail = 0;
sum = 0;
hypre_SerialBoxLoop1Begin(3, loop_size,
x1_data_box, start, unit_stride, xi1);
{
sum += xp1[xi1];
if (xp1[xi1] != 1)
{
hypre_BoxLoopGetIndex(index);
hypre_printf("*(%d,%d,%d) = %d\n",
index[0], index[1], index[2], (HYPRE_Int) xp1[xi1]);
fail = 1;
}
}
hypre_SerialBoxLoop1End(xi1);
if (sum != (nx*ny*nz))
{
hypre_printf("*sum = %d\n", sum);
fail = 1;
}
if (fail)
{
exit(1);
}
/*-----------------------------------------------------------
* Synchronize so that timings make sense
*-----------------------------------------------------------*/
hypre_MPI_Barrier(hypre_MPI_COMM_WORLD);
/*-----------------------------------------------------------
* Time old boxloops
*-----------------------------------------------------------*/
/* Time BoxLoop0 */
time_index = hypre_InitializeTiming("BoxLoop0");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
xi1 = 0;
hypre_BoxLoop0Begin(3, loop_size);
{
xp1[xi1] += xp1[xi1];
//xi1++;
}
hypre_BoxLoop0End();
}
hypre_EndTiming(time_index);
/* Time BoxLoop1 */
time_index = hypre_InitializeTiming("BoxLoop1");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
hypre_BoxLoop1Begin(3, loop_size,
x1_data_box, start, unit_stride, xi1);
{
xp1[xi1] += xp1[xi1];
}
hypre_BoxLoop1End(xi1);
}
hypre_EndTiming(time_index);
/* Time BoxLoop2 */
time_index = hypre_InitializeTiming("BoxLoop2");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
hypre_BoxLoop2Begin(3, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2);
{
xp1[xi1] += xp1[xi1] + xp2[xi2];
}
hypre_BoxLoop2End(xi1, xi2);
}
hypre_EndTiming(time_index);
/* Time BoxLoop3 */
time_index = hypre_InitializeTiming("BoxLoop3");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
hypre_BoxLoop3Begin(3, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2,
x3_data_box, start, unit_stride, xi3);
{
xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3];
}
hypre_BoxLoop3End(xi1, xi2, xi3);
}
hypre_EndTiming(time_index);
/* Time BoxLoop4 */
time_index = hypre_InitializeTiming("BoxLoop4");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
hypre_BoxLoop4Begin(3, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2,
x3_data_box, start, unit_stride, xi3,
x4_data_box, start, unit_stride, xi4);
{
xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3] + xp4[xi4];
}
hypre_BoxLoop4End(xi1, xi2, xi3, xi4);
}
hypre_EndTiming(time_index);
hypre_PrintTiming("Old BoxLoop times", hypre_MPI_COMM_WORLD);
hypre_FinalizeTiming(time_index);
hypre_ClearTiming();
/*-----------------------------------------------------------
* Time new boxloops
*-----------------------------------------------------------*/
/* Time BoxLoop0 */
time_index = hypre_InitializeTiming("BoxLoop0");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
xi1 = 0;
zypre_BoxLoop0Begin(dim, loop_size);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE) firstprivate(xi1) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop0For()
{
xp1[xi1] += xp1[xi1];
xi1++;
}
zypre_BoxLoop0End();
}
hypre_EndTiming(time_index);
/* Time BoxLoop1 */
time_index = hypre_InitializeTiming("BoxLoop1");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
zypre_BoxLoop1Begin(dim, loop_size,
x1_data_box, start, unit_stride, xi1);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop1For(xi1)
{
xp1[xi1] += xp1[xi1];
}
zypre_BoxLoop1End(xi1);
}
hypre_EndTiming(time_index);
/* Time BoxLoop2 */
time_index = hypre_InitializeTiming("BoxLoop2");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
zypre_BoxLoop2Begin(dim, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop2For(xi1, xi2)
{
xp1[xi1] += xp1[xi1] + xp2[xi2];
}
zypre_BoxLoop2End(xi1, xi2);
}
hypre_EndTiming(time_index);
/* Time BoxLoop3 */
time_index = hypre_InitializeTiming("BoxLoop3");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
zypre_BoxLoop3Begin(dim, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2,
x3_data_box, start, unit_stride, xi3);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop3For(xi1, xi2, xi3)
{
xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3];
}
zypre_BoxLoop3End(xi1, xi2, xi3);
}
hypre_EndTiming(time_index);
/* Time BoxLoop4 */
time_index = hypre_InitializeTiming("BoxLoop4");
hypre_BeginTiming(time_index);
for (rep = 0; rep < reps; rep++)
{
zypre_BoxLoop4Begin(dim, loop_size,
x1_data_box, start, unit_stride, xi1,
x2_data_box, start, unit_stride, xi2,
x3_data_box, start, unit_stride, xi3,
x4_data_box, start, unit_stride, xi4);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop4For(xi1, xi2, xi3, xi4)
{
xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3] + xp4[xi4];
}
zypre_BoxLoop4End(xi1, xi2, xi3, xi4);
}
hypre_EndTiming(time_index);
hypre_PrintTiming("New BoxLoop times", hypre_MPI_COMM_WORLD);
hypre_FinalizeTiming(time_index);
hypre_ClearTiming();
/*-----------------------------------------------------------
* Finalize things
*-----------------------------------------------------------*/
hypre_BoxDestroy(x1_data_box);
hypre_BoxDestroy(x2_data_box);
hypre_BoxDestroy(x3_data_box);
hypre_BoxDestroy(x4_data_box);
hypre_TFree(xp1, HYPRE_MEMORY_HOST);
hypre_TFree(xp2, HYPRE_MEMORY_HOST);
hypre_TFree(xp3, HYPRE_MEMORY_HOST);
hypre_TFree(xp4, HYPRE_MEMORY_HOST);
/* Finalize MPI */
hypre_MPI_Finalize();
return (0);
}
|
GB_binop__rminus_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int8)
// A*D function (colscale): GB (_AxD__rminus_int8)
// D*A function (rowscale): GB (_DxB__rminus_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int8)
// C=scalar+B GB (_bind1st__rminus_int8)
// C=scalar+B' GB (_bind1st_tran__rminus_int8)
// C=A+scalar GB (_bind2nd__rminus_int8)
// C=A'+scalar GB (_bind2nd_tran__rminus_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_INT8 || GxB_NO_RMINUS_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
conv3x3s1_winograd64_neon4_AoA.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv3x3s1_winograd64_neon4_AoA(const Mat& bottom_blob, Mat& top_blob, const Mat& _bias, const Option& opt,
int outch, int inch, int outh, int outw)
{
const float* bias = _bias;
Mat top_blob_tm = bottom_blob;
Mat top_blob_bordered = top_blob;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#if __ARM_NEON
const float coeff[4] = { 4.f, 8.f, 16.f, 32.f };
float32x4_t _coeff = vld1q_f32(coeff);
#endif // __ARM_NEON
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
#if __ARM_NEON
float32x2_t _bias0 = vdup_n_f32(bias0);
#endif // __ARM_NEON
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
#if __ARM_NEON
const float* output0_tm0_0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm0_4 = out0_tm.row(i * w_tm/8 + j + tiles);
const float* output0_tm1_0 = out0_tm.row(i * w_tm/8 + j + tiles*2);
const float* output0_tm1_4 = out0_tm.row(i * w_tm/8 + j + tiles*3);
const float* output0_tm2_0 = out0_tm.row(i * w_tm/8 + j + tiles*4);
const float* output0_tm2_4 = out0_tm.row(i * w_tm/8 + j + tiles*5);
const float* output0_tm3_0 = out0_tm.row(i * w_tm/8 + j + tiles*6);
const float* output0_tm3_4 = out0_tm.row(i * w_tm/8 + j + tiles*7);
#if __aarch64__
for (int m=0; m+3<8; m+=4)
{
float32x4_t _output0_tm0_0123 = vld1q_f32(output0_tm0_0);
float32x4_t _output0_tm0_4567 = vld1q_f32(output0_tm0_4);
float32x4_t _output0_tm1_0123 = vld1q_f32(output0_tm1_0);
float32x4_t _output0_tm1_4567 = vld1q_f32(output0_tm1_4);
float32x4_t _output0_tm2_0123 = vld1q_f32(output0_tm2_0);
float32x4_t _output0_tm2_4567 = vld1q_f32(output0_tm2_4);
float32x4_t _output0_tm3_0123 = vld1q_f32(output0_tm3_0);
float32x4_t _output0_tm3_4567 = vld1q_f32(output0_tm3_4);
float32x4x2_t _output0_tm01_00221133 = vtrnq_f32(_output0_tm0_0123, _output0_tm1_0123);
float32x4x2_t _output0_tm01_44665577 = vtrnq_f32(_output0_tm0_4567, _output0_tm1_4567);
float32x4x2_t _output0_tm23_00221133 = vtrnq_f32(_output0_tm2_0123, _output0_tm3_0123);
float32x4x2_t _output0_tm23_44665577 = vtrnq_f32(_output0_tm2_4567, _output0_tm3_4567);
// no vswp intrinsic :(
float32x4_t _output0_tm_00 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[0]), vget_low_f32(_output0_tm23_00221133.val[0]));
float32x4_t _output0_tm_11 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[1]), vget_low_f32(_output0_tm23_00221133.val[1]));
float32x4_t _output0_tm_22 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[0]), vget_high_f32(_output0_tm23_00221133.val[0]));
float32x4_t _output0_tm_33 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[1]), vget_high_f32(_output0_tm23_00221133.val[1]));
float32x4_t _output0_tm_44 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[0]), vget_low_f32(_output0_tm23_44665577.val[0]));
float32x4_t _output0_tm_55 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[1]), vget_low_f32(_output0_tm23_44665577.val[1]));
float32x4_t _output0_tm_66 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[0]), vget_high_f32(_output0_tm23_44665577.val[0]));
float32x4_t _output0_tm_77 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[1]), vget_high_f32(_output0_tm23_44665577.val[1]));
float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a);
_tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1);
_tmp0 = vaddq_f32(_tmp0, _tmp024b);
float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1);
float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[2][m], _tmp2);
vst1q_f32(&tmp[4][m], _tmp4);
float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0);
float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a);
_tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1);
_tmp5 = vaddq_f32(_tmp5, _tmp135c);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[5][m], _tmp5);
output0_tm0_0 += out0_tm.w * tiles * 2*4;
output0_tm0_4 += out0_tm.w * tiles * 2*4;
output0_tm1_0 += out0_tm.w * tiles * 2*4;
output0_tm1_4 += out0_tm.w * tiles * 2*4;
output0_tm2_0 += out0_tm.w * tiles * 2*4;
output0_tm2_4 += out0_tm.w * tiles * 2*4;
output0_tm3_0 += out0_tm.w * tiles * 2*4;
output0_tm3_4 += out0_tm.w * tiles * 2*4;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
for (int m=0; m+1<6; m+=2)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0+4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1+4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]);
float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]);
float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]);
float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]);
float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]);
float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]);
float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]);
float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]);
float32x2_t _tmp024a = vadd_f32(_t_11, _t_22);
float32x2_t _tmp135a = vsub_f32(_t_11, _t_22);
float32x2_t _tmp024b = vadd_f32(_t_33, _t_44);
float32x2_t _tmp135b = vsub_f32(_t_33, _t_44);
float32x2_t _tmp024c = vadd_f32(_t_55, _t_66);
float32x2_t _tmp135c = vsub_f32(_t_55, _t_66);
float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a);
_output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1);
_output_0 = vadd_f32(_output_0, _tmp024b);
_output_0 = vadd_f32(_output_0, _bias0);
float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1);
_output_2 = vadd_f32(_output_2, _bias0);
float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _bias0);
output0[0] = vget_lane_f32(_output_0, 0);
output1[0] = vget_lane_f32(_output_0, 1);
output0[2] = vget_lane_f32(_output_2, 0);
output1[2] = vget_lane_f32(_output_2, 1);
output0[4] = vget_lane_f32(_output_4, 0);
output1[4] = vget_lane_f32(_output_4, 1);
float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _bias0);
float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0);
_output_3 = vadd_f32(_output_3, _bias0);
float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a);
_output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1);
_output_5 = vadd_f32(_output_5, _tmp135c);
_output_5 = vadd_f32(_output_5, _bias0);
output0[1] = vget_lane_f32(_output_1, 0);
output1[1] = vget_lane_f32(_output_1, 1);
output0[3] = vget_lane_f32(_output_3, 0);
output1[3] = vget_lane_f32(_output_3, 1);
output0[5] = vget_lane_f32(_output_5, 0);
output1[5] = vget_lane_f32(_output_5, 1);
t0 += 8*2;
t1 += 8*2;
output0 += outw*2;
output1 += outw*2;
}
#else // __aarch64__
float* t0 = tmp[0];
float* t1 = tmp[1];
int step = out0_tm.w * tiles * 2*4 *4;
asm volatile(
// loop0
"vld1.f32 {d16-d17}, [%2], %21 \n"
"vld1.f32 {d18-d19}, [%3], %21 \n"
"vld1.f32 {d20-d21}, [%4], %21 \n"
"vld1.f32 {d22-d23}, [%5], %21 \n"
"vld1.f32 {d24-d25}, [%6], %21 \n"
"vld1.f32 {d26-d27}, [%7], %21 \n"
"vld1.f32 {d28-d29}, [%8], %21 \n"
"vld1.f32 {d30-d31}, [%9], %21 \n"
"vtrn.32 q8, q10 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"sub %0, %0, #112 \n"
"vst1.f32 {d30-d31}, [%1] \n"
"sub %1, %1, #112 \n"
// loop1
"vld1.f32 {d16-d17}, [%2] \n"
"vld1.f32 {d18-d19}, [%3] \n"
"vld1.f32 {d20-d21}, [%4] \n"
"vld1.f32 {d22-d23}, [%5] \n"
"vld1.f32 {d24-d25}, [%6] \n"
"vld1.f32 {d26-d27}, [%7] \n"
"vld1.f32 {d28-d29}, [%8] \n"
"vld1.f32 {d30-d31}, [%9] \n"
"vtrn.32 q8, q10 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"vst1.f32 {d30-d31}, [%1] \n"
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(output0_tm0_0), // %2
"=r"(output0_tm0_4), // %3
"=r"(output0_tm1_0), // %4
"=r"(output0_tm1_4), // %5
"=r"(output0_tm2_0), // %6
"=r"(output0_tm2_4), // %7
"=r"(output0_tm3_0), // %8
"=r"(output0_tm3_4) // %9
: "0"(t0),
"1"(t1),
"2"(output0_tm0_0),
"3"(output0_tm0_4),
"4"(output0_tm1_0),
"5"(output0_tm1_4),
"6"(output0_tm2_0),
"7"(output0_tm2_4),
"8"(output0_tm3_0),
"9"(output0_tm3_4),
"w"(_coeff), // %20
"r"(step) // %21
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
t0 = tmp[0];
t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
int stepw = outw*2 * 4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop1
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop2
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(t0), // %2
"=r"(t1) // %3
: "0"(output0),
"1"(output1),
"2"(t0),
"3"(t1),
"w"(_coeff), // %8
"w"(_bias0), // %9
"r"(stepw) // %10
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
const float* output0_tm_0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm_4 = out0_tm.row(i * w_tm/8 + j + tiles);
for (int m=0; m<8; m++)
{
float tmp024a = output0_tm_0[1] + output0_tm_0[2];
float tmp135a = output0_tm_0[1] - output0_tm_0[2];
float tmp024b = output0_tm_0[3] + output0_tm_4[0];
float tmp135b = output0_tm_0[3] - output0_tm_4[0];
float tmp024c = output0_tm_4[1] + output0_tm_4[2];
float tmp135c = output0_tm_4[1] - output0_tm_4[2];
tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm_4[3] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += out0_tm.w * tiles * 2;
output0_tm_4 += out0_tm.w * tiles * 2;
}
float* output0 = out0.row(i * 6) + j * 6;
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
#endif // __ARM_NEON
}
}
}
}
}
}
|
raceCond1.c | // a progrm with race condition
// online source: http://sourceforge.net/apps/trac/cppcheck/ticket/663
#include <omp.h>
int main()
{
int a[50],i;
a[0] = 1;
#pragma omp parallel for
for(i=1; i<50; i++)
{
a[i] = i + a[i-1];
}
}
|
GB_subassign_03.c | //------------------------------------------------------------------------------
// GB_subassign_03: C(I,J) += scalar ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 03: C(I,J) += scalar ; using S
// M: NULL
// Mask_comp: false
// C_replace: false
// accum: present
// A: scalar
// S: constructed
// C is not bitmap: use GB_bitmap_assign instead
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_03
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_BinaryOp accum,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ;
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_CLEAR_STATIC_HEADER (S, &S_header) ;
GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_GET_C ; // C must not be bitmap
const int64_t *restrict Ch = C->h ;
const int64_t *restrict Cp = C->p ;
const bool C_is_hyper = (Ch != NULL) ;
const int64_t Cnvec = C->nvec ;
GB_GET_S ;
GB_GET_ACCUM_SCALAR ;
//--------------------------------------------------------------------------
// Method 03: C(I,J) += scalar ; using S
//--------------------------------------------------------------------------
// Time: Optimal; must visit all IxJ, so Omega(|I|*|J|) is required.
// Entries in S are found and the corresponding entry in C replaced with
// the scalar.
// Method 01 and Method 03 are very similar.
//--------------------------------------------------------------------------
// Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19)
//--------------------------------------------------------------------------
GB_SUBASSIGN_IXJ_SLICE ;
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC) += scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
bool found = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
if (!found)
{
// ----[. A 1]----------------------------------------------
// S (i,j) is not present, the scalar is present
// [. A 1]: action: ( insert )
task_pending++ ;
}
else
{
// ----[C A 1] or [X A 1]-----------------------------------
// both S (i,j) and A (i,j) present
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_withaccum_C_A_1_scalar ;
GB_NEXT (S) ;
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC) += scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
bool found = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
if (!found)
{
// ----[. A 1]----------------------------------------------
// S (i,j) is not present, the scalar is present
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
else
{
// both S (i,j) and A (i,j) present
GB_NEXT (S) ;
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
try.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "omp.h"
#define N 1499336 //33554432 // 2 ^ 25
void comp(long long int *input_array, long long int *out, unsigned long size)
{
long long int nthr,*tt,*pp, *x = out;
#pragma omp parallel num_threads(3)
{
long long int i;
#pragma omp single
{
nthr = omp_get_num_threads();
printf("Nthr - %lld\n",nthr);
tt = malloc(sizeof(long long int)*nthr);
pp = malloc(sizeof(long long int)*nthr);
}
long long int tid = omp_get_thread_num();
long long int sum = 0;
#pragma omp for schedule(static)
for(i=0; i<size; i++) {
// printf("Tid - %lld, i = %lld\n",tid,i);
sum += input_array[i];
x[i] = sum;
}
pp[tid] = sum;
#pragma omp barrier
// printf("tid- %lld,sum - %lld\n",tid,sum);
for(i=1; i<nthr; i*=2) {
if(tid>=i){
tt[tid] = pp[tid] + pp[tid-i];
}
#pragma omp barrier
#pragma omp single
memcpy(pp + 1, tt + 1, sizeof(long long int) * (nthr - 1));
}
#pragma omp single
{
long long int k;
for(k=0;k<nthr;k++){
// printf("tid - %lld , tt - %lld\n",k,pp[k]);
}
}
#pragma omp for schedule(static)
for(i=0; i<size; i++) {
if(tid>=1){
x[i] += pp[tid-1];
}
// printf("i - %lld,sum - %lld \n",i,x[i]);
}
}
free(tt);
free(pp);
}
int main(void ) {
long long int *input_array, *pprefixsum ;
input_array = (long long int*) malloc(sizeof(long long int) * N);
pprefixsum = (long long int*) malloc(sizeof(long long int) * N);
for(long long int i=0; i<N; i++) input_array[i] = i+1;
// for(long long int i=0; i<N; i++) printf("%d ", input_array[i]); printf("\n");
double start = omp_get_wtime();
comp(input_array, pprefixsum, N);
double end = omp_get_wtime();
printf("Time Taken - %f s.\n",end-start);
// for(long long int i=0; i<N; i++) printf("%d ", pprefixsum[i]); printf("\n");
// for(long long int i=0; i<N; i++) printf("%d ", (i+1)*(i+2)/2); printf("\n");
for(long long int i=0; i<N; i++) {
if(pprefixsum[i]!=(i+1)*(i+2)/2 ){
printf("EEEEEEEEError");
break;
}
}
free(input_array);
free(pprefixsum);
return 0;
} |
GB_binop__iseq_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_fp64)
// A*D function (colscale): GB (_AxD__iseq_fp64)
// D*A function (rowscale): GB (_DxB__iseq_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_fp64)
// C=scalar+B GB (_bind1st__iseq_fp64)
// C=scalar+B' GB (_bind1st_tran__iseq_fp64)
// C=A+scalar GB (_bind2nd__iseq_fp64)
// C=A'+scalar GB (_bind2nd_tran__iseq_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_FP64 || GxB_NO_ISEQ_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
parallel_for.h | /*
Copyright (c) 2013, Taiga Nomi and the respective contributors
All rights reserved.
Use of this source code is governed by a BSD-style license that can be found
in the LICENSE file.
*/
#pragma once
#include <cassert>
#include <cstdio>
#include <limits>
#include <string>
#include <type_traits>
#include <vector>
#include "aligned_allocator.h"
#include "nn_error.h"
#include "tiny_dnn/config.h"
#ifdef CNN_USE_TBB
#ifndef NOMINMAX
#define NOMINMAX // tbb includes windows.h in tbb/machine/windows_api.h
#endif
#include <tbb/task_group.h>
#include <tbb/tbb.h>
#endif
#if !defined(CNN_USE_OMP) && !defined(CNN_SINGLE_THREAD)
#include <future>
#include <thread>
#endif
#if defined(CNN_USE_GCD) && !defined(CNN_SINGLE_THREAD)
#include <dispatch/dispatch.h>
#endif
namespace tiny_dnn {
#ifdef CNN_USE_TBB
static tbb::task_scheduler_init tbbScheduler(
tbb::task_scheduler_init::automatic); // tbb::task_scheduler_init::deferred);
typedef tbb::blocked_range<size_t> blocked_range;
template <typename Func>
void parallel_for(size_t begin, size_t end, const Func &f, size_t grainsize) {
assert(end >= begin);
tbb::parallel_for(
blocked_range(begin, end, end - begin > grainsize ? grainsize : 1), f);
}
template <typename Func>
void xparallel_for(size_t begin, size_t end, const Func &f) {
f(blocked_range(begin, end, 100));
}
#else
struct blocked_range {
typedef size_t const_iterator;
blocked_range(size_t begin, size_t end) : begin_(begin), end_(end) {}
blocked_range(int begin, int end)
: begin_(static_cast<size_t>(begin)), end_(static_cast<size_t>(end)) {}
const_iterator begin() const { return begin_; }
const_iterator end() const { return end_; }
private:
size_t begin_;
size_t end_;
};
template <typename Func>
void xparallel_for(size_t begin, size_t end, const Func &f) {
blocked_range r(begin, end);
f(r);
}
#if defined(CNN_USE_OMP)
template <typename Func>
void parallel_for(size_t begin,
size_t end,
const Func &f,
size_t /*grainsize*/) {
assert(end >= begin);
#pragma omp parallel for
for (size_t i = begin; i < end; ++i) f(blocked_range(i, i + 1));
}
#elif defined(CNN_USE_GCD)
template <typename Func>
void parallel_for(size_t begin, size_t end, const Func &f, size_t grainsize) {
assert(end >= begin);
size_t count = end - begin;
size_t blockSize = grainsize;
if (count < blockSize || blockSize == 0) {
blockSize = 1;
}
size_t blockCount = (count + blockSize - 1) / blockSize;
assert(blockCount > 0);
dispatch_apply(blockCount, dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0),
^(size_t block) {
size_t blockStart = block * blockSize;
size_t blockEnd = blockStart + blockSize;
if (blockEnd > end) {
blockEnd = end;
}
assert(blockStart < blockEnd);
f(blocked_range(blockStart, blockEnd));
});
}
#elif defined(CNN_SINGLE_THREAD)
template <typename Func>
void parallel_for(size_t begin,
size_t end,
const Func &f,
size_t /*grainsize*/) {
xparallel_for(begin, end, f);
}
#else
template <typename Func>
void parallel_for(size_t begin,
size_t end,
const Func &f,
size_t /*grainsize*/) {
assert(end >= begin);
size_t nthreads = std::thread::hardware_concurrency();
size_t blockSize = (end - begin) / nthreads;
if (blockSize * nthreads < end - begin) blockSize++;
std::vector<std::future<void> > futures;
size_t blockBegin = begin;
size_t blockEnd = blockBegin + blockSize;
if (blockEnd > end) blockEnd = end;
for (size_t i = 0; i < nthreads; i++) {
futures.push_back(
std::move(std::async(std::launch::async, [blockBegin, blockEnd, &f] {
f(blocked_range(blockBegin, blockEnd));
})));
blockBegin += blockSize;
blockEnd = blockBegin + blockSize;
if (blockBegin >= end) break;
if (blockEnd > end) blockEnd = end;
}
for (auto &future : futures) future.wait();
}
#endif
#endif // CNN_USE_TBB
template <typename T, typename U>
bool value_representation(U const &value) {
return static_cast<U>(static_cast<T>(value)) == value;
}
template <typename T, typename Func>
inline void for_(std::true_type,
bool parallelize,
size_t begin,
T end,
Func f,
int grainsize = 100) {
parallelize = parallelize && value_representation<size_t>(end);
parallelize ? parallel_for(begin, static_cast<size_t>(end), f, grainsize)
: xparallel_for(begin, static_cast<size_t>(end), f);
}
template <typename T, typename Func>
inline void for_(std::false_type,
bool parallelize,
size_t begin,
T end,
Func f,
int grainsize = 100) {
parallelize ? parallel_for(begin, static_cast<size_t>(end), f, grainsize)
: xparallel_for(begin, end, f);
}
template <typename T, typename Func>
inline void for_(
bool parallelize, size_t begin, T end, Func f, size_t grainsize = 100) {
static_assert(std::is_integral<T>::value, "end must be integral type");
for_(typename std::is_unsigned<T>::type(), parallelize, begin, end, f,
grainsize);
}
template <typename T, typename Func>
inline void for_i(bool parallelize, T size, Func f, size_t grainsize = 100) {
#ifdef CNN_SINGLE_THREAD
for (size_t i = 0; i < size; ++i) {
f(i);
}
#else // #ifdef CNN_SINGLE_THREAD
for_(parallelize, 0, size,
[&](const blocked_range &r) {
#ifdef CNN_USE_OMP
#pragma omp parallel for
#endif
for (size_t i = r.begin(); i < r.end(); i++) {
f(i);
}
},
grainsize);
#endif // #ifdef CNN_SINGLE_THREAD
}
template <typename T, typename Func>
inline void for_i(T size, Func f, size_t grainsize = 100) {
for_i(true, size, f, grainsize);
}
} // namespace tiny_dnn
|
my_functions.h | #ifndef _MY_FUNCTIONS
#define _MY_FUNCTIONS
#endif
#ifndef PI
#define PI 3.14159265358979323846
#endif
// New transpose function that deals with cache miss problem
// Exact implementation is modified from
// http://stackoverflow.com/questions/5200338/a-cache-efficient-matrix-transpose-program
void transpose_cache(double* restrict dst,const double* restrict src, size_t n){
__assume_aligned(dst, 64);
__assume_aligned(src, 64);
size_t block=64;
for(size_t i = 0; i < n-block; i += block)
for(size_t j=0; j < n; ++j ){
for(size_t b = 0; b < block ; ++b){
dst[j*n+b]=src[b*n+j];
}
}
size_t start = n - n%block;
for(size_t j=0; j < n; ++j )
for(size_t b = start; b < n; ++b){
dst[j*n+b]=src[b*n+j];
}
}
void vec_subtract(double* restrict u, const double* restrict v, size_t n){
__assume_aligned(u, 64);
__assume_aligned(v, 64);
size_t block=64;
for(size_t i = 0;i < n-block; i += block)
for(size_t j=0; j < n; ++j ){
for(size_t b = 0; b < block ; ++b){
u[j*n+b] -= v[j*n+b];
}
}
size_t start = n - n%block;
for(size_t j=0; j < n; ++j )
for(size_t b = start; b < n; ++b){
u[j*n+b] -= v[j*n+b];
}
}
void vec_transpose_add(double* restrict u, const double* restrict v, size_t n){
__assume_aligned(u, 64);
__assume_aligned(v, 64);
size_t block=64;
for(size_t i = 0;i < n-block; i += block)
for(size_t j=0; j < n; ++j ){
for(size_t b = 0; b < block ; ++b){
u[b*n+j] += v[j*n+b];
}
}
size_t start = n - n%block;
for(size_t j=0; j < n; ++j )
for(size_t b = start; b < n; ++b){
u[b*n+j] += v[j*n+b];
}
}
void vec_subtract_half(double* restrict u, const double* restrict v, size_t n){
__assume_aligned(u, 64);
__assume_aligned(v, 64);
size_t block=64;
for(size_t i = 0;i < n-block; i += block)
for(size_t j=0; j < n; ++j ){
for(size_t b = 0; b < block ; ++b){
u[j*n+b] -= 0.5*v[j*n+b];
}
}
size_t start = n - n%block;
for(size_t j=0; j < n; ++j )
for(size_t b = start; b < n; ++b){
u[j*n+b] -= 0.5*v[j*n+b];
}
}
inline
void apply_tri_special(double* restrict u, double* restrict u_temp, const double a, const double b, const int n){
/*
This operator is designed to apply a tridiagonal matrix multiplication with the following
structure:
0 0 0 0 0 0
a b a 0 0 0
0 a b a 0 0
......
0 0 0 a b a
0 0 0 0 0 0
There is little room for generalizing its application but it should yield great performance boost
*/
__assume_aligned(u, 64);
__assume_aligned(u_temp, 64);
memcpy(u_temp, u, n*sizeof(double));
int N = n - 1;
for(int i = 1; i < N ; i++)
u[i] = a*(u_temp[i-1]+u_temp[i+1]) + b * u_temp[i];
u[0] = 0.0;
u[N] = 0.0;
}
inline
void apply_tri_special_plus(double* restrict u, double* restrict u_temp, const double a, const double b, const int n){
/*
This operator is designed to apply a tridiagonal matrix multiplication with the following
structure:
1 0 0 0 0 0
a b+1 a 0 0 0
0 a b+1 a 0 0
......
0 0 0 a b+1 a
0 0 0 0 0 1
There is little room for generalizing its application but it should yield great performance boost
*/
__assume_aligned(u, 64);
__assume_aligned(u_temp, 64);
memcpy(u_temp, u, n*sizeof(double));
int N = n-1;
for(int i = 1; i < N; i++){
u[i] += a*(u_temp[i-1]+u_temp[i+1]) + b * u_temp[i];
}
}
void solve_tri_special(double* restrict x, const double a, const double b, const int n){
//void solve_tri_special(double* restrict x, double* restrict cprime, const double a, const double b, const int n){
/*
This solve is designed to solve a tridiagonal linear system using thomas' algorithm with the following structure:
1 0 0 0 0 0
-a 1-b -a 0 0 0
0 -a 1-b -a 0 0
......
0 0 0 -a 1-b -a
0 0 0 0 0 1
There is little room for generalizing its application but it should yield great performance boost
*/
// Allocate scratch space.
//double* restrict cprime __attribute__((aligned(64))) = (double*) _mm_malloc(sizeof(double) * n, 64);
double* cprime = (double*) _mm_malloc(sizeof(double) * n, 64);
cprime[0] = 0;
double astar = -a;
double bstar = 1-b;
double m;
int N = n-1;
// loop from 1 to N - 2 inclusive
for (int i = 1; i < N; i++) {
m = 1.0 / (bstar - astar * cprime[i - 1]);
cprime[i] = astar * m;
x[i] = (x[i] - astar * x[i - 1]) * m; // I'm not sure about this step, might step into the last one
}
cprime[N] = 0;
// loop from N - 2 to 0 inclusive, safely testing loop end condition
for (int i = N; i-- > 0; )
x[i] = x[i] - cprime[i] * x[i + 1];
_mm_free(cprime);
}
void relaxOperation(double * restrict u, const double * restrict fstar, double* scratch, double a, double b, int n){
//double* restrict ustar __attribute__((aligned(64))) = (double*) _mm_malloc(n*n*sizeof(double),64);
//double* restrict u_t __attribute__((aligned(64))) = (double*) _mm_malloc(n*n*sizeof(double),64);
double* ustar = (double*) _mm_malloc(n*n*sizeof(double),64);
double* u_t = (double*) _mm_malloc(n*n*sizeof(double),64);
transpose_cache(u_t, u, n);
// keep two copies and then do it.
for (int i = 1; i < n-1; i++) {
apply_tri_special_plus(u+i*n, scratch, a/2, b/2, n);
}
for (int i = 1; i < n-1; i++) {
apply_tri_special(u_t+i*n, scratch, a, b, n);
}
memset(u_t, 0.0, n*sizeof(double));
memset(u_t+(n-1)*n, 0.0, n*sizeof(double));
vec_subtract(u, fstar, n);
vec_transpose_add(u, u_t, n);
// int j;
//#ifdef _OPENMP
//#pragma omp parallel for private(j) //schedule(dynamic) default(shared)
//#endif
// for (j = 1; j < n-1; j++) {
// solve_tri_special(u+j*n, a/2, b/2, n);
// }
// transpose_cache(ustar, u, n);
// vec_subtract_half(ustar, u_t, n);
//#ifdef _OPENMP
//#pragma omp parallel for private(j)
//#endif
// for (j = 1; j < n-1; j++) {
// solve_tri_special(ustar+j*n, a/2, b/2, n);
// }
int j;
#pragma omp parallel
{
#pragma omp for private(j) schedule(dynamic, 8)
#pragma vector aligned
for (j = 1; j < n-1; j++) {
solve_tri_special(u+j*n, a/2, b/2, n);
}
#pragma omp single
{
transpose_cache(ustar, u, n);
vec_subtract_half(ustar, u_t, n);
}
#pragma omp for private(j) schedule(dynamic, 8)
#pragma vector aligned
for (j = 1; j < n-1; j++) {
solve_tri_special(ustar+j*n, a/2, b/2, n);
}
}
transpose_cache(u, ustar, n);
_mm_free(ustar);
_mm_free(u_t);
}
double normInf_cache(const double* restrict u, const size_t n){
__assume_aligned(u, 64);
double max = u[0];
size_t block=64;
for(size_t i = 0;i < n-block; i += block)
for(size_t j=0; j < n; ++j ){
for(size_t b = 0; b < block ; ++b){
max = (max < fabs(u[j*n+b]))? fabs(u[j*n+b]):max;
}
}
size_t start = n - n%block;
for(size_t j=0; j < n; ++j )
for(size_t b = start; b < n; ++b){
max = (max < fabs(u[j*n+b]))? fabs(u[j*n+b]):max;
}
return max;
}
double test_init(const double x, const double y, const double wx, const double wy, const double ax, const double ay){
double d2Xfact = (2.0*PI*wx)*(2.0*PI*wx);
double d2Yfact = (2.0*PI*wy)*(2.0*PI*wy);
return -(cos(2.0*PI*x*wx)*cos(2.0*PI*y*wy))/(ax*d2Xfact + ay*d2Yfact);
}
// Functions that are for more general purposes, not in use right now
//
//void apply_tri(double* restrict vOut, const double* restrict vIn,
// const double* restrict diag, const double* restrict udiag,
// const double* restrict ldiag, const int n){
// vOut[0] = diag[0] * vIn[0] + udiag[0] * vIn[1];
// vOut[n-1] = diag[n-1] * vIn[n-1] + ldiag[n-2] * vIn[n-2];
// for(int i = 1; i < n-1; i++){
// vOut[i] = ldiag[i-1]*vIn[i-1] + diag[i]*vIn[i] + udiag[i]*vIn[i+1];
// }
//}
//void solve_tri(double* restrict x, const double* restrict ldiag,
// const double* restrict diag, const double* restrict udiag,
// const int n) {
// // Allocate scratch space.
// double* cprime = (double*) malloc(sizeof(double) * n);
// cprime[0] = udiag[0] / diag[0];
// x[0] = x[0] / diag[0];
// // loop from 1 to N - 1 inclusive
// for (int i = 1; i < n; i++) {
// double m = 1.0 / (diag[i] - ldiag[i-1] * cprime[i - 1]);
// cprime[i] = udiag[i] * m;
// x[i] = (x[i] - ldiag[i-1] * x[i - 1]) * m;
// }
// // loop from N - 2 to 0 inclusive, safely testing loop end condition
// for (int i = n - 1; i-- > 0; )
// x[i] = x[i] - cprime[i] * x[i + 1];
// // free scratch space
// free(cprime);
//}
//void transpose(double* restrict u_t, const double* restrict u, const int n){
// __assume_aligned(u_t, 64);
// __assume_aligned(u, 64);
// for (int i = 0; i < n; i++) {
// for (int j = 0; j < n; j++) {
// u_t[j*n+i] = u[i*n+j];
// }
// }
//}
//double normInf(const double* restrict u, const int n){
// double max = u[0];
// for (int i = 1; i < n*n; i++) {
// max = (max < fabs(u[i]))? fabs(u[i]):max;
// }
// return max;
//}
|
krb5_tgs_fmt_plug.c | /*
* Based on the work by Tim Medin
* Port from his Pythonscript to John by Michael Kramer (SySS GmbH)
*
* This software is
* Copyright (c) 2015 Michael Kramer <michael.kramer@uni-konstanz.de>,
* Copyright (c) 2015 magnum
* Copyright (c) 2016 Fist0urs <eddy.maaalou@gmail.com>
*
* Modified by Fist0urs to improve performances by proceeding known-plain
* attack, based on defined ASN1 structures (then got rid of RC4 rounds
* + hmac-md5)
*
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_krb5tgs;
#elif FMT_REGISTERS_H
john_register_one(&fmt_krb5tgs);
#else
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "misc.h"
#include "formats.h"
#include "common.h"
#include "dyna_salt.h"
#include "md4.h"
#include "hmacmd5.h"
#include "rc4.h"
#include "unicode.h"
#include "memdbg.h"
#ifndef OMP_SCALE
#define OMP_SCALE 256
#endif
#define FORMAT_LABEL "krb5tgs"
#define FORMAT_NAME "Kerberos 5 TGS etype 23"
#define FORMAT_TAG "$krb5tgs$23$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "MD4 HMAC-MD5 RC4"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 0
#define BINARY_ALIGN MEM_ALIGN_NONE
#define SALT_SIZE sizeof(struct custom_salt *)
#define SALT_ALIGN sizeof(struct custom_salt *)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
/*
assuming checksum == edata1
formats are:
$krb5tgs$23$checksum$edata2
$krb5tgs$23$*user*realm*spn*$checksum$edata2
*/
static struct fmt_tests tests[] = {
{"$krb5tgs$23$74809c4c83c3c8279c6058d2f206ec2f$78b4bbd4d229487d5afc9a6050d4144ce10e9245cdfc0df542879814ce740cebb970ee820677041596d7e55836a18cc95c04169e7c74a4a22ae94e66f3d37150e26cc9cb99e189ef54feb7a40a8db2cb2c41db80d8927c74da7b33b52c58742d2109036b8ab27184609e7adff27b8f17b2f2a7b7d85e4ad532d8a70d48685a4390a9fc7a0ab47fd17334534d795abf83462f0db3de931c6a2d5988ab5bf3253facfff1381afb192ce385511c9052f2915ffdb7ea28a1bbad0573d9071e79dc15068527d50100de8813793a15c292f145fa3797ba86f373a4f0a05e5f2ec7dbfd8c8b5139cc7fbb098ea1dd91a7440134ffe2aff7174d0df13dcad82c81c680a70127a3ec8792bdecd74a878f97ff2b21277dc8c9a2f7bbcd9f72560dd933d85585259067d45a46a6f505d03f188b62c37edf03f117503a26743ebd674d5b07324c15fc8418881613b365402e0176da97d43cf85e8239b69aee07791233a959bcaf83a7f492fa718dd0a1747eaf5ce626eb11bda89e8235a056e2721f45c3b61442d893ef32a8c192ea0dadb853f3c6f3c75e92f23c744605c6f55578f696b0f33a9586b8aae3e12e38a097692cd9a31d780d973eaaf62ef23b2fc9ae59a38bfd8ea14d3289b46910f61a90aa733e66382bc27f40ba634e55ef1bec0ca7f71546b79566d85664b92f9fae495fcef5cde4c4399a6798569a7e81b9cc4bdde7104f3fe181401f82bba944e3b0a406c7093c00ff9d5984a82517b1a64a8aa561bc1f0cbafbdbbc5654d375c91d4e485e17bb06838109fbc1504147481c91652f545086a84daa423a6286ea6bb13460c5ff3d865a7b37b9ce4e7b07fbe2f6897c12c1e4df2e875c1ec9cfbf84097a7f48b270baf3481263b21849ab93c231490d06a23461a5e00c23df76bca8e5a19256d859304e1f5752bf055ac7f4843e1ad174f1cbbf5c142958f9310025ce439d5979982fb0b8c2ea95e1a22ee8dc63423d9d364cb0b95bcdf89ec4ed485b9005326d728757d77aa3e020a4a61d7deb782bc5264dca350173609772cd6d003ee8104dd24d310c9a18a44f78e27d65095f5bb54f6118c8f0d79ad5a850cec8d40a19bd0134144e904c9eb7fdcff3293696071fc1118f6b2f934281a25bcd5ca7d567714b1e43bd6d09bfcc8744c0ca273a75938394ac2fb31957287093346078577c94a71dfa6ad4a63211f54f00ef7a9064d070aaff84116ee891728915c938a8a32e87aaa00ec18e2b4e9ae88f7e53f08d855052a995f92351be32d8df934eab487103b0f089828e5fb5f73af3a8a05b9fffd25c43a392994743de3de1a2a9b8bba27e02ae2341f09d63aafab291759c41b9635521ca02f08e21e7e5c3ce75c8c3515eaa99aeb9bf8e204663e8b6b8507ecf87230a131687b4770e250ba0ef29fa3ca3b47b34971e17c6a3ef785acdd7c90da9d2", "test123"},
{"$krb5tgs$23$ee09e292a05e3af870417758b1cdfd04$a1a480b8505d2f2f0ff06ddce40c2f6e76bd06fa64dcd5b0646a68effcd686b2e41562ebda90da7e7b36d95cd16ca8a33b8d99184d6b7fa7a2efec3a05dcb63b3e815ffd38849dc69174d1efb3a871544b73a6da55d2331bd4b60743d1654873e3c1748ce155c35a1711695296ab944d158e374b67f43dd07eab2bcacec1be480e5c1338e3834f7133909f5c7970ece39e73bd96d40f696cb5a8575e5e1feab937b616d6180cc3258e22b9fc495017593e15fc10e674af8184c282a0d80902ea9dabda5fb0a56d7980bfd4b62b330155cd8e318dc5be55500cb8ddd691b629af371463c411f1c11d21811e1546477b85f0a85e296f5df737930aff5015111d2f01a236ab7c77e9dab001f52400cccbcdb31bb180db027bd0fa2f6000dce7c1e072c0effbdee23a401720b1fe54a09a75430497f42f6e047d62d1123866d6ed37e58f8e2c1e462acb1a97a44a5ccef49897af190a46b3ab057d18c1e47d717c7a63658357d58d9cd5b7672f0a946f95f6e2ec3aee549e20e3b11237ea59f87723f24e03a6fac9e51086bc84142631ed36ee6855920f3d3d1e85d0faaa0a8b04a2b050b17f94d44af7f48302fa70dcf43279415983924e5d874c59722b6fb87ad1006fcb51e4341bb2cc4caf8c4b7993269af219cf4efa12b1009961c22f123c35f982e4ca75a97cd37f7f16be111ad301637ffb1664ccb021d3cf6bf771e07dc42202dac079c6bd7559f8e7a939bc14e9ddb45fe1b88c5f83b1ff966342bb9211afd15772cf5f871d39d0b30776d51d84b046df30d250c1877d146047e784c4bc2e6745f357dd0b1c6aaa11e26a0e3c2772781695f6a3bc536ba19e2327ec8c0866bd78d3b5b067abcf6991eafc8b7a11ad4049711263f3c68b358f246da1308d5a0daac1d7efedbc237be3d6a4bafe5ce66e941f7227d2b869bda637dfd223a4546340c59e7d0e2b58f60a67590468a53a5d28cc35cec36a9c5610c70c0633767539640b42cff787f4782057ff70d0e64658413429347f5449c1360da4d0827c4197bbb0361c6d0e04bcaf6bba1233912f806772146c0e778ac06749bbd3d8819007d070ae912580ff11a41f71b0907b88fb585585ebe42b4cc4ecde8ff7b49a856dd70f316425e53feff3ee6ca1e44d9ba5e607a41cf26edf44bffe2796f94ea2d767fbf81f665a7fedf0291e76c6fa409dc99c56954f21edc77f6173c5a3a909c8756f3cc5cc6c2d2e405f333ee0b50284aacfb81f9dfc6058b78b282db4511580eb623dc393919befc250d224490936e5fb16c483f4bd00c8915288d0ddf3812eaa3d46ad5a24c56390076730d23b2de6558ddadddba725f9b4a73d13de3e1276fc285194e3a2f613d9b020d0485d7e26b36b7b917f4911024127320627066fabbd465b4cd5d5fdebae804d15db0f5b276659364bec32a13a8d9e11349f54bd", "bluvshop2"},
{"$krb5tgs$23$*Fist0urs$MYDOMAIN$cifs/panda.com*$423cb47a258e5859c13381ae64de7464$8dd47d94e288a1b32af726d2eac33710fb1610e4c6f674907d7a74d26515a314173b2b531baa790b70467ebe538fc9e941bf4d7f7218a4ec17c1dc963b717d5837fcd5ae678189101a1b4831a53a1322ca6e8f5d644e4aa72e99bedb4a0e967c3e05ccdcc96137265612969a1214a71038dea845250cac45551963fe85f193d88aa39ed57b95b934295e17de04ebf0ad275df67f65fb1fc2ee3095c6af02c4c1b8efa570e1c2ac562601c5ac89bd6f59ca8b957660aa00787d4a0f9d9f29b15eb3b85823f7c9814eab9106210c37d863cf8413391c5941a994fdd52a44e4f8e8e4c9b8b520e62015fb5ed40e91e7a453b3ddcefb888fd896c187993a899b6a30d27a5b2b7847a410c0cce8b0fcf90367bfd8e6dfa7eb37676ecdf500c9a51ffb59792c13e222371e024f857134b7039931daa66a6902da37e71c41adf83846a9df1e75575696d7a6f1744d48e8215849773903c9475c29a1ec0fcc11257f9479467c2b65679a3da298e6806d619794dfc06b10b5e0a46e395c3ade3d750292f244cabb7172d83dbd42c6e3bd5a93a8c2d5fe84b23a3c60508733f5a087763f2fa514d18f891461b8ea22f7eaa847906182bd0415c28d197c06df8449cc2c6c2016c38672a67613a14ccac9025c4da85fc0825dcd9a1269e6064f80c0de445fbdd237d35ab0eb6ae468413c5b17c9955a8c8c34952c8a188bad7e5b18651a75b1c46cf116422378a94a19c31dfa634c8ab15f4f13e7e427741ab9e8f247b4a8fe2562986ee21f602b4fad45bd535718020b764da6f346e3b028db8a1af88419f3ea9141fcf0c622ed40d894814e5d60a9dcdfc8344f802c7b2f0089131e57ac0cc071af13c3b2b7302e9df4665c48b91f4ef0bb2a60a272e5841e0ee8da01a91773d41f295514b65ccb2190195f720d9838b3e7c701b51e813ef0262fbdbbe06391ba3fe4232e74523dfa933e6d3df2494ddd9f254afdf97623ceb5d32483a870cf72a57617bdbf97f0420c041edb5a884ff401dc21da0472d7a75d89dc9937fd65c3a422063ea44e3954435d38b8f34cec2c0360c8bef392f77fbab76a7b801e05b467d4980d20f0a7dbc1c39f50ce4429df1ec167c6be67d2fbd507a3f7b5d98cf214ae0510fac51e1075a06250d65a3a1179486bda5d982b7904682835079e3042f39a582492cd14dbafb5826e242c81998752043e2dd91b648f115900595f5191a01f187c4b6dea4917e4773a5fb28cb1d20508142a3905068c931a8c9a8fa291b92f8ece9884affd8787a5aa11858274879160e930587f3c32e2cabbd124c708641df09f82d05ab4db157ad24931dc36c616dbb778762ead6a8491ce8a48037106d382283ac69422c04af3ae2cbe22eff6dff21bc34b154a5fab666870c59aba65bd4e0ea0be3f394bb4901fd64a0e19293b8026188615c84601b7fecdb62b", "jlcirr."},
// 1-40a00000-john@HTTP-adc1.toor.com-TOOR.COM.kirbi file
{"$krb5tgs$23$e43ae991b15bbd11c19395c6c785f4d4$07ea84f4cf5ab2ad5a1a15c5776e7bc024d26451771e653c9cb0b87d8a5d73317f992913621a61039d143818585aee976b5273f53023d28a1da22c8a2f79e47956da4221bd10809fb777b4684cbbc102bda46dc816eb5a5315196f1b2cd47fee6ddc1adae753c96eefe77bf8e8e54e33489595f0c3cb47db9bef77438f666c15de4ee9893839c5280daebd81d476a00944f8282eed61af43578fc6f68dbb47ad9106ea1f58125355506016ccf997d35d8ccad169ba7eebe27e76d19188a227158172b405c7e053da1e3bafae4cd39594e7a03e7a96bdbc63a793fba6c26135d6d1789395f0155341e04f80097540ffb1f299f61960a34db3ea14b95b4633b7eea3a552140e7e42708009fdda3d1b42b3297142bfc036abd3d28f07ba1c8362e1c5b346f55af7214314a92fa412733825f55fe4a56b56859af00eb4f69cc7ad339b7bc8032ff1057be3e73c5533f4f546e599ecbf60305569c9b87b22971ef012ff92f582688b001ad23901dae743c46cae6603f7b6b88db78fcfd59997e8a1078f8a27e28a6628bc59d78674d9d16a6413da369ab58cb702dba01c710fbfed87f4665dfb3cc4a8f83ebf960435ae96973e699cd419324ddf115825c99890b2bb8e35ce0005a2adf95ce691b135358c63aa87088ed615c5a9667927e691bf7135677893abc41c038d25ff9091c14e3d1da85c7f0edaed32c9b3b56d2c473b2363b93aae5cc9b02db47e7a22a639a951e2edce7580f691c2ee0f8ebdfb02cdc6de8d1028e34085d1a69cdebb00a430b5ddce223bd1cc9c66b746a46584c098f051b97180ee8db4268a3a838504884df45227cac6fe9e73704877f04558c9640ac2ed33b3216b2e17805863a955285f4633407097f439d7063faeacdcee0d6d4e6c2adbe85df0e51eb3c08da1cedb4fa70ff74b2711a7294b499597c1f30c1dd3cc12751692311a16e22b3fa6af75eb0ace4170df497ba860445b1fc964771eafc034515918bb080a6d05ab1170708e6ce80bf9b00f808a2b814e89d0ac9b5d1a23686b48e99fdc50c71b5fef8a9bfc851e40bed59f69821109be0119151768e4d91b8b00c46b39af207ad4a2566ce7751ac124c3c5851cd1026052d34988272bf2851bd1a4536816a7635d83c1378b442eb04c15d5028763e0b189c8f45703c54d62aaea570c9e56b0e721d170cda74f91a4101c495fb565bb03f2ad635335c88db112dfb073bb4d1547de3214de5e371bfe9b440de3882f7b83593ca0fc60f4e6e2e3885b2a365a56b529904c74bc58ab38432f0dfbbd3f4d543f9d8685b0aa69aa807701e09e1253b6ed4948c7ceaaafdd0baed2663881d52a163101a5bb697a65b2bfcc54d0dd", "1qaz@WSX"},
// http://www.openwall.com/lists/john-users/2017/02/24/1 (Kerberoasting, TGS-REP)
{"$krb5tgs$23$*mssqlsvc/w2k8-sql.ville.com/65498*$0f5394cf9746bc8ff5b090f89971816d$2e86c9139cb881c784377c14abdbf4058eaeeb19476b0e54dcbd0599c6c349f96513419d80b73de389890ba1f67e94a1f5d9f29aa36d3cfc86c6b047afd57173c723388a1f88ec80e2575dee2a42f5a1c4fc39be69a303ce12328ce6b6e17cb7660312a93774c48ff972fdb29557c201126aebfd5f1d0ca116cc9f5a1b7f7a0847486a988663171441d9e0778ceb160fccd69e194bd6e350cd10f39628414a54629a0e3f170b7bec339ec4ee89db0eb558ad4afd086e0cb90f35b596fc89d81e4f18d75dba84246d2b5e446099f80714d88a251c4e1ce31682d2ec754ac0a2fb0ba56c93c075722318f1152041bc0a0ef558efa1a2b56043df12596f7f0880643787fde2db55a4153c183cda692b23b4ee796488d04bbd10b91e51a5f2d0753902e95534fd73433d1ad268f5caa67e2436e7a722451e8bb07e148928f4c8c416151e440fa99c03543bf21c30e5fd299c31ccb91a058b650aa07c89e84545a84a437bb215e68df7627f90ad1766e6f0ca31858d023376acf1cf06faca36e4054acceae001ab5ac96c8ba4e2a6d285ca837a2b9bbaa9ed4a92a27ae285b67f2198f0461697967826a916d2955707bde3af57e3c71330e3cef292c273ef274379aed1d9117c07c245c0054a3beb4e5dc3a4b960fae326d5f1a7fa517327c514ab4f33b9c2942f15a0b453fa8226ee6bdd2310da2da169724041dd3fce4baff594f37ee6c8d1b62da27d21e7492fe05e7f2c9e4a0d3a9540b9a50c1fa697c6311f2af31b6e743f01f499c2ece315ad74c861f379276b8d8a50c41eea5cb0b2eac7f011d759b09d2cf0aecae519ea5a25fac4cc726ae56ce76136049256e7375fdb2e3ee60f408e28657872514b63dcff78bf2840e71d9d318620409f2ba171a6cb8f05b56fa1f0c39fd91284faa497df8e1a053160fb48b75f4ddf94c9f67bb6a248cd5008931a5ba5768d51430b0a8f8f43c928d1a693725f4787999322c59fec3563fdc9bdae5981f1398a843bf4258433d4f79ad5fea293926de05dcb60668d349650e015ce3e17b1860dea3989bd87f698c5dc9dece7e4733ba069dfa86e8ddc02e13c6de02724dd7d6fa48f25984c1666341a61c4008dd66e6a072a278ed6f009a3a3c0a48946b8d7ce7c22e1009cb6d482a7f3b7105990a1770fae62b2e28281ee5ade79149a8e8a8efc77edfd1308f4ba7f1142f5fa0a73d08ec9a3391cbbfb30c586e001db0fbda98d3fdaf6751180674c8c097aed64ad870568fd4ec55ce9afbdb301954b14115df691213483825286b4c5f86f5bd71d99ae757e4d8c17420b73a4bf37e8584141c5055dc38ca76c16536e85c5b3e88fbac95e626391569de6b0d9da0cb0bce65926927fb37f892a059be16e064ec2fff275b976540b017f18553756cf3e6f2fe5a08bf8fd8cca8814ebae6124fc766bcc93eeb375c19e", "Compl3xP4ssw0rd"},
// https://hashcat.net/wiki/doku.php?id=example_hashes
{"$krb5tgs$23$*user$realm$test/spn*$63386d22d359fe42230300d56852c9eb$891ad31d09ab89c6b3b8c5e5de6c06a7f49fd559d7a9a3c32576c8fedf705376cea582ab5938f7fc8bc741acf05c5990741b36ef4311fe3562a41b70a4ec6ecba849905f2385bb3799d92499909658c7287c49160276bca0006c350b0db4fd387adc27c01e9e9ad0c20ed53a7e6356dee2452e35eca2a6a1d1432796fc5c19d068978df74d3d0baf35c77de12456bf1144b6a750d11f55805f5a16ece2975246e2d026dce997fba34ac8757312e9e4e6272de35e20d52fb668c5ed", "hashcat"},
// 1-40a10000-luser@MSSQLSvc~example.net-EXAMPLE.NET.kirbi
{"$krb5tgs$23$70c1576b3fec9b24ddb925efcbdc687c$ac33782f96977412999a6e1010f8b5e099da60c31603280188290bbd336d6a10b029bf5e3eb1218870e27170b704334f4e60b90e5ebcbe7ef102d06785a00c28f337d2995c347493548d854f7208abe4405430e42b6aca8b6d640068d5ab05c2c0176707dbdc096628925937345a9e4f67692773b0df58c36703bb738191681e7424fa85fe964b8a6bc4ef379da8af8513582ccfebf86dd2ec7bd91a702d2eda40d8882aa2042ccf5ca40b7eb370643b003e3909d08a433be7657b5d695ff3abca64191ead8433c2638e08bca64011406a3724aaf70d153a69cc84e8c24b98786ed24a57b4a312346ceef1f30c5a1e437049af054071fdef28747f786207ce7e085dcea3aeee31a7aca11022308fc7db549b0285565710422c9f4dba94090f8ea34113050f75e5e850303e18f29cc6fd8d45a87730bacc9258d179db9b98524f7c1041f7af71ed96f816c7cf73d3d8f8249d9a485fe56dda09f2ca41edda6cf3095f4aff036d9ca71cbed651a1f89bd5607962ff395a3398ef9e4ece1e9ecb59cacd41331c2971ff03f9016875dc03e96a4ade7d318a50af1724a95c8e2441ecd22f041d31f49cc461de3869f930240e02a1f7ff9af331dd48798acc45abf48d9d29caaeda467e4194df14b3dd5678abaf56fd092b9c8a6858d351ed14126e0a7e78970ba462d71a5afc50c544e64c5f708e63f34b6363c0d1921522959a1eda4f46096874b48d88b3fddbbf8984e2a1b836f6bec614806e41aa1b2ac3165942d371c208621ebf2dfe99cfe81367dffea3b3d7a7eaa1eed76d3f3bc9461e884cb3fc747ed344579ba7d803d6e775f80d71fba90535602c016b63d14f50ff8732e6f4f0eb4fdc47b7cf84346f21498d4fc2f3124ce16fcd41caedc5178ccc54cea6298c2e938d887991a84c4dfa71541ca0acb154a1603e24004648e5c81a87a7aababdb48c8cd091cea6cc7aeff2589efd2970cfa9fa073c26ed024fa2864c75058c135d3e1722cf6174169ea69dc346ccf3773ea6598f2597ba7840fc334bc7571c534384f3e4301cc430326c480d16defe7bab77960fdb939208a15445676a488f1cbb02e577cefb51b7bf465e73ded374b3224837d76a4163b8d05cfea83e216dcb6e6441c61f1d90f4c1f592b7538105a63af5843138406173eb3a1df3060312d420c0f360f1054c605019b77098cf8d6684c3c33280e3c15b2ffc776b11e08a417225ec92dbd25d05a8ffbc4662cae0cb14ccb157f36c84baea827cbd14e1d02b3f780d6339128c8ff8513e1a28006f8ffb531d798af6880bbea0044c5fbdbad3a17dfcd028ea334e2e4ac5b50819ac25e6386870105b2a9040324ad014041141898dd40e18f5a2acd7f0c8b8cd9d58975857f2df9582ba6d5a78ead595bbfae5451f7e6e261209c36c9aaaca4d2f53b0b9fa0afa2cfc5228a027599d816b8ee7", "openwall@123"},
// AD-2012-luser-openwall@123-TGS-SPN.pcap -> extracttgsrepfrompcap.py -> kirbi2john.py, same hash (with better details) is generated by krb2john.py
{"$krb5tgs$23$bdcb2559d28a8857a88102b8c131b861$2a02a3a7d75ecbdc2152588b64c1668a613e0d670cfecb723541096e34a5cf144d151422f3892b5392adffa0f26b03ef6378874e89dc950e395269eb2f114eaf13c61bdd02a2a4af594ab7d1743d7d33d3a9953262f173e61bed92bb097e3225128c9d1531d09cf940aafb9700d9245abb7c2b66479af3fbf9f022eaec9f80b3b7e497b7f25bd9ba796e0a9fb9aae3842d980fa511c0041b956f9f24a120a3a14135b47da44fe3a45c3177a55dc5b1986c1c279b96a5c63869733a61d12d141ca2b969a06ef6cb33597567e01165168a20ee3267ceb32a5553659e9c88d37ca6cce7ebbded80941868369238662f26b10d36e4f2a9426a84fc5abc905b8286cd8bc87bdda9c36a3902222f27e4007fb3d9fb29b7dcff1414584e5d142c35de02e2d3634539ccc787d374686ad67ea4f3aacbd4a418459a8d3d065e4ecdb3b270fafd8d48cd9022a9d9ca00a0d64bcd27046e4bf809f95a94dfcc9700c4c6921e46b1260023ee62c2b87b9873016b92c262385435015e23cfb3f4aa7821c2cabe9b376a1385a32c986dbd5b2466fe20d5358d228810e3b0532bfb6ce690feee3ec0b7ed9f9211ae2a34fe5cc24599d4d28668b869a3fba93b948d57a96da995402e37bf07a8d42cd0bde9b140496e6c53c072d44e28b958659697da4a396607950c5ed54640615ff0857e1148679366206153f5076b4ebf5f7a931335cc3afd0873eba1164400810c5de6cbaf04af5c2ef92ed616d57e14c8eeff1289dc9d53bd94ff0587653526147d1525b66a4012aa2dd67991a86d4b680458de43d22048a1486cc5142b85ebd2e1bbcca572fb155812cf4dc5c4dad7e1e3297185958f623f4e81d657108a2b4721e54fb4ea8806a6b9f0726a5ef2d9eb4a3e2eaeab1632666f20de1d57e58181a5f231a6b2862ca6b7cb33b79e918356b9a85f69de3854463c93150e291727ec6e82cd0a1bc284bc2101bedcd77d1ccf052b736b074e978baae93ec908ba924c445951b66605c05ede1eadc0595f14707aba99bb35ac222d3d34d9b1f59c5edecee434385f4a2a30d2aa8f4c339a23c91b943affd5c6bf51d9deb0116f323f1a3253bdede5e9297b2b202a227f3670073c7d29a6afb21814421d6923f99cb2fe976fd24169c2d97536df04e043e93ef8a81fd0579210d52503ad0cb4a4abae164b3520e9d540eab0d8c1cc88c5afc5bfb253bf0bd02e9c9f61427fa83340c5d1907cf00f6fe27e820544a55c6ba0d0ebeba96c8ad21ceeb0967b2fcea324269dd0b73d5cfdd1e568d9086d7ea93197cda743c7e90b9939fcbf3976a3aa01b4fbea4b849f3aeaf6c1a70ba6df98accbd0545d6abab6d0598e21d450731c68337780171bd440743548854e9643c8d1ae51580fab64d314d697628ace7616287d284f53831a3a831e75a0fbf7bfba917e2ade6f7f2fc78b3ed4000652", "openwall@123"},
// From https://blog.xpnsec.com/kerberos-attacks-part-1/ by https://twitter.com/_xpn_
{"$krb5tgs$23$*iis_svc$LAB.LOCAL$HTTP/iis.lab.local*$0f6fc474db169aa8ce9b5e626daacc9d$1a346ce3f66c52976f53831aa24a1b217cdf0d68a0eb87fee00cfd32f544bf83ebb6416732522b12232dd6935eac076b439f56e6cb7fa6c37d984d132e2d2cb65ca399cd5e44eb2eb41f12c40f9044b40e3ea914278c8a3098babacf49ab46e776d1413ef63abcdf6418d2db9241b2fdd9309346ec59af20a82fd6daea9510c1dfd1a9e8d99c59ff72e985057ba0d18394b0a7cb1bd74f8d436a3dd780175a0c6bcad9e46570a476ab9913b561ee481ad8c33a3c81ced055e959f08a52eba7a342f53183e1531be8ec2d28c7ecfa32f98dbc7ff87b4e5c79824f3868d38ce09010960726d58cfbfc88c9d34ab199169f39010aa4aab92b6ea40f875963d518311b3f079d97b65fe9768c9a4ee50f7c16d525fdc081ce359a0b0fe5fb18d8d8690d8f88b010bef4f28dc151a4137272ae9eaca9053406c0ddeae453196e3b6c28b8359724bfc089b772cbae093bf88abc070d12b0ff2e721d7b8b10b822bfb514091effaf3f5fa8c286a9e45bf76ba171e6cabeb3ddadc297185c51a295855b8cfa8062bd6770093355c32690fd184d6eae2b66ea1f553cbc7679681db5089fdb23329efe59de807e657a98ccc0c2d95eaac9f363d5b8c9b8a23aab680c328b019ae99440a5d8795014be22f6739a4f77874e94196f010c012f9a4a587570c38874ad7f8b9ec554fb865752a5f3dd4f785c9af54031100ce580dfadf4c70ff11839647fc288fce8d00bbcb680e02a46230ecb0530ba1771fb8485ba17f5218852c5cdfc769b89d77b37802cc6d22e6ba944f6e4b565d8d04418c44bf10e06294fd58913ca6d206bb6e46f15b3abfc09695f5fbab81d2e743ac19b24716d9d6cb6bae65674f5cdf1935d1413a4be6d96eafaf65cfa361decc0ab1e12998b5c26b6ad38c8077fd149cdeda227c4c68f19fbf22b23e7e84581a64a413c1c983e01b56c2000656b4aad8c67260fc0142eeccd96d624fa284b619d11e797af2d730a5998d9e6d9f4fef58a847d7d9b804be2925beae627a0a9f335072f97f214a24db58cf5e2e74f0eeff1a43f1ec1b88c0110f3c2abaac0d3e954a42b550c37cf84babe6e85ec4e0885eb8309a4c5e2a1bb473b332ff5c31c0b4c32db507c1eca5b7ae607d2423ee1e7f07361229e0ab2678cfbd07afffc5e989c5ab1821ac2f524083258d3f0ca7e7f8250be3f7cc72cf636b098a3c9b3f4e289fd81a9b3c33bfa63ed8813bbc12205134add9fb8548312b734c921a2cf8a1687af7ee022b0f57bbf0f8d8f17952614cb288b95df3fe4f03d20b83227328603dafb264537eb0cacda18de21aa99e07600030424edb41fc3c8161238971bf62af99db8e2d438af06f9d8feeff3edb6a4d4f0a6fb5dfdbe99b1ed454d6ff3dc508c45ed430923212a088e6200b2076da509888edd32fca946a215c8934db7a3b5ac6bed10e4a114f2f132608dbe236cba73cbcffc024fb500e96c3d766ca7f4083ded3666c2b7dcd290f65f7e80ff70fa575777a845fbf7af05b38dfb1ccd7accc0398f8dbf532e28dc6bc0ec49d18f2753caec5912693a0b6050f2bfce72f5160847dcfc78d580609007ddbdf1f338c61c13e7b62fcec6e51d1c0cd1ec0167e40042", "Passw0rd"},
// ../run/ccache2john.py krb5cc_1000_linux-client-to-AD-server
{"$krb5tgs$23$8bef2ad33c7d5d23c8693ccd868d9f84$f72688ca744f9634625c2dbd94ef6193644e08c4627839ce5fcf8bbfbd15ad7093fb3ecc24512ac161188a7ad2de127166f4f244f37527e15468d844efe6396756b8cf8341cbfe70e454e1f2b324f5ddee64f4f4947e1bb6776041e28e6261b57b1791677b35e1b4171ec5a05062b7bd7eb5558d6e4165ad916e790954f163a7f7e8390462295d74eff33fcbb442d4281f7cf68ddbb6ce27030015b2cf4f68e04793a9674e634663a68622e13a5a6d8c27bf8b90ee18033e754c95776d1dbd8d73ab292d27c445eb33b9b814945617f527678667b2d81adc57c7c62713d7826bc9ea918b330245a4d7fbcc33889901a63131be87a0d4e8f49d1259141677bfa27c284d8cfb1c94f4d4a602f30b774e58378710d0ad2a73c978c8c6470402c0cdbdf5d505fe07dc4250510836d5ec969b56790128aa75b292d71d4e6c77ffee261230fc7b11969e4701abc63f120420cc0f74f99e42d6613713fc0530842f77b7a45daadb296c744e0725cd2867492b5bc1cb6a4e3315e1f6b7e5b0299a9283235769d95ef63c885352b08176547f86a03ec323a2e5c84f66f3ab699fc7c5e8b2120659c963d5c98df9529939b250e0f1703f5a9859f8b00a5317aa92dc37093dc87984c0eb841cd2ce77b80510c79e8b5eb56c15f07eba936d257a4d78f82a6179ba7ed07fbeba68d43112e44f850a5034b9239c689828f7039b8c8efcbecbc34ab92d5fb869482456a92ce9201e6586458e1d7bd38b2251b1fb573dc1c6ce40a428448cfaf18e5251436aece6baa2356a06a50a6d4c011977233e7b38007cad4a869941fea1139ff4d2ce216166abf5dfa89dc85719b14eaa0d492b1d442b67cb10c81bb35e1a96ae4f62e8c4fbf604c916e2ac4a121620a2beb40cf7d43bbe7ddbb38435dacd7aba846915eb01b57d33d5dcc25082e79452416d86143e33608a8e279d396cca4a8cc5d02ad820868c4381f9f7f5bab23ba8440fd9ab5ae713cb1c20a82e0a0e93a83248b2a2a8a30aceed71a8e78526943b45f122f16ea004ffdea16005bb52e184b7498c8aefb26da2e8b43e98343e9bb364578deceee623656985fdb58dda60a112d9dc1f0e36230d43ab8d1f48ba2013fd641001f675b8918fb69826decefd742a3f3c02b49988b2b439db763db2d95744a2f2456f5b32ea64e8836873ecce085875b4e054e55700198642961560529bba800a683f149c63dd5f983477f415399f229f6f292327ebac73a6707da022e472f1081421c8549d21696db7fe7bdd2ad744a5afbada9a719c26e3962cd86f8e", "Passw0rd"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static unsigned char (*saved_K1)[16];
static int any_cracked, *cracked;
static size_t cracked_size;
static int new_keys;
static struct custom_salt {
dyna_salt dsalt;
unsigned char edata1[16];
uint32_t edata2len;
unsigned char* edata2;
} *cur_salt;
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char *ptr, *keeptr;
int i;
if (strstr(ciphertext, "$SOURCE_HASH$"))
return ciphertext;
ptr = mem_alloc_tiny(strlen(ciphertext) + FORMAT_TAG_LEN + 1, MEM_ALIGN_NONE);
keeptr = ptr;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) {
memcpy(ptr, FORMAT_TAG, FORMAT_TAG_LEN);
ptr += FORMAT_TAG_LEN;
}
for (i = 0; i < strlen(ciphertext) + 1; i++)
ptr[i] = tolower(ARCH_INDEX(ciphertext[i]));
return keeptr;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
char *ctcopy;
char *keeptr;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if (ctcopy[0] == '*') { /* assume account's info provided */
ctcopy++;
p = strtokm(ctcopy, "*");
ctcopy = strtokm(NULL, "");
if (!ctcopy || *ctcopy != '$')
goto err;
++ctcopy; /* set after '$' */
goto edata;
}
if (ctcopy[0] == '$')
ctcopy++;
edata:
/* assume checksum */
if (((p = strtokm(ctcopy, "$")) == NULL) || strlen(p) != 32)
goto err;
/* assume edata2 following */
if (((p = strtokm(NULL, "$")) == NULL))
goto err;
if (!ishex(p) || (hexlen(p, &extra) < (64 + 16) || extra))
goto err;
if ((strtokm(NULL, "$") != NULL))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_alloc_align(sizeof(*saved_key) *
self->params.max_keys_per_crypt,
MEM_ALIGN_CACHE);
saved_K1 = mem_alloc_align(sizeof(*saved_K1) *
self->params.max_keys_per_crypt,
MEM_ALIGN_CACHE);
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc(cracked_size, 1);
}
static void done(void)
{
MEM_FREE(saved_K1);
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static void *get_salt(char *ciphertext)
{
int i;
static struct custom_salt cs;
char *p;
char *ctcopy;
char *keeptr;
static void *ptr;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
memset(&cs, 0, sizeof(cs));
cs.edata2 = NULL;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) == 0) {
ctcopy += FORMAT_TAG_LEN;
if (ctcopy[0] == '*') {
ctcopy++;
p = strtokm(ctcopy, "*");
ctcopy += strlen(p) + 2;
goto edata;
}
if (ctcopy[0]=='$')
ctcopy++;
}
edata:
if (((p = strtokm(ctcopy, "$")) != NULL) && strlen(p) == 32) { /* assume checksum */
for (i = 0; i < 16; i++) {
cs.edata1[i] =
atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
/* skip '$' */
p += strlen(p) + 1;
/* retrieve non-constant length of edata2 */
for (i = 0; p[i] != '\0'; i++)
;
cs.edata2len = i/2;
cs.edata2 = (unsigned char*) mem_calloc_tiny(cs.edata2len + 1, sizeof(char));
for (i = 0; i < cs.edata2len; i++) { /* assume edata2 */
cs.edata2[i] =
atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
}
MEM_FREE(keeptr);
/* following is used to fool dyna_salt stuff */
cs.dsalt.salt_cmp_offset = SALT_CMP_OFF(struct custom_salt, edata1);
cs.dsalt.salt_cmp_size = SALT_CMP_SIZE(struct custom_salt, edata1, edata2len, 0);
cs.dsalt.salt_alloc_needs_free = 0;
ptr = mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD);
memcpy(ptr, &cs, sizeof(struct custom_salt));
return (void *) &ptr;
}
static void set_salt(void *salt)
{
cur_salt = *(struct custom_salt**)salt;
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, strlen(key) + 1);
new_keys = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
const unsigned char data[4] = {2, 0, 0, 0};
int index;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
unsigned char K3[16];
#ifdef _MSC_VER
unsigned char ddata[65536];
#else
unsigned char ddata[cur_salt->edata2len + 1];
#endif
unsigned char checksum[16];
RC4_KEY rckey;
if (new_keys) {
MD4_CTX ctx;
unsigned char key[16];
UTF16 wkey[PLAINTEXT_LENGTH + 1];
int len;
len = enc_to_utf16(wkey, PLAINTEXT_LENGTH,
(UTF8*)saved_key[index],
strlen(saved_key[index]));
if (len <= 0) {
saved_key[index][-len] = 0;
len = strlen16(wkey);
}
MD4_Init(&ctx);
MD4_Update(&ctx, (char*)wkey, 2 * len);
MD4_Final(key, &ctx);
hmac_md5(key, data, 4, saved_K1[index]);
}
hmac_md5(saved_K1[index], cur_salt->edata1, 16, K3);
RC4_set_key(&rckey, 16, K3);
RC4(&rckey, 32, cur_salt->edata2, ddata);
/*
8 first bytes are nonce, then ASN1 structures
(DER encoding: type-length-data)
if length >= 128 bytes:
length is on 2 bytes and type is
\x63\x82 (encode_krb5_enc_tkt_part)
and data is an ASN1 sequence \x30\x82
else:
length is on 1 byte and type is \x63\x81
and data is an ASN1 sequence \x30\x81
next headers follow the same ASN1 "type-length-data" scheme
*/
if (((!memcmp(ddata + 8, "\x63\x82", 2)) && (!memcmp(ddata + 16, "\xA0\x07\x03\x05", 4)))
||
((!memcmp(ddata + 8, "\x63\x81", 2)) && (!memcmp(ddata + 16, "\x03\x05\x00", 3)))) {
/* check the checksum to be sure */
RC4(&rckey, cur_salt->edata2len - 32, cur_salt->edata2 + 32, ddata + 32);
hmac_md5(saved_K1[index], ddata, cur_salt->edata2len, checksum);
if (!memcmp(checksum, cur_salt->edata1, 16)) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
}
new_keys = 0;
return *pcount;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return cracked[index];
}
struct fmt_main fmt_krb5tgs = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP | FMT_DYNA_SALT | FMT_HUGE_INPUT,
{NULL},
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
fmt_default_binary,
get_salt,
{NULL},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_dyna_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
matrix.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M AAA TTTTT RRRR IIIII X X %
% MM MM A A T R R I X X %
% M M M AAAAA T RRRR I X %
% M M A A T R R I X X %
% M M A A T R R IIIII X X %
% %
% %
% MagickCore Matrix Methods %
% %
% Software Design %
% Cristy %
% August 2007 %
% %
% %
% Copyright @ 2007 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image-private.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
/*
Typedef declaration.
*/
struct _MatrixInfo
{
CacheType
type;
size_t
columns,
rows,
stride;
MagickSizeType
length;
MagickBooleanType
mapped,
synchronize;
char
path[MagickPathExtent];
int
file;
void
*elements;
SemaphoreInfo
*semaphore;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMatrixInfo() allocates the ImageInfo structure.
%
% The format of the AcquireMatrixInfo method is:
%
% MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows,
% const size_t stride,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: the matrix columns.
%
% o rows: the matrix rows.
%
% o stride: the matrix stride.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(SIGBUS)
static void MatrixSignalHandler(int status)
{
magick_unreferenced(status);
ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache");
}
#endif
static inline MagickOffsetType WriteMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX));
#else
count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
static MagickBooleanType SetMatrixExtent(
MatrixInfo *magick_restrict matrix_info,MagickSizeType length)
{
MagickOffsetType
count,
extent,
offset;
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (matrix_info->synchronize != MagickFalse)
(void) posix_fallocate(matrix_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,MatrixSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns,
const size_t rows,const size_t stride,ExceptionInfo *exception)
{
char
*synchronize;
MagickBooleanType
status;
MatrixInfo
*matrix_info;
matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info));
if (matrix_info == (MatrixInfo *) NULL)
return((MatrixInfo *) NULL);
(void) memset(matrix_info,0,sizeof(*matrix_info));
matrix_info->signature=MagickCoreSignature;
matrix_info->columns=columns;
matrix_info->rows=rows;
matrix_info->stride=stride;
matrix_info->semaphore=AcquireSemaphoreInfo();
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
matrix_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
matrix_info->length=(MagickSizeType) columns*rows*stride;
if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,matrix_info->length);
if ((status != MagickFalse) &&
(matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length)))
{
status=AcquireMagickResource(MemoryResource,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->mapped=MagickFalse;
matrix_info->elements=AcquireMagickMemory((size_t)
matrix_info->length);
if (matrix_info->elements == NULL)
{
matrix_info->mapped=MagickTrue;
matrix_info->elements=MapBlob(-1,IOMode,0,(size_t)
matrix_info->length);
}
if (matrix_info->elements == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,matrix_info->length);
}
}
matrix_info->file=(-1);
if (matrix_info->elements == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,matrix_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=DiskCache;
matrix_info->file=AcquireUniqueFileResource(matrix_info->path);
if (matrix_info->file == -1)
return(DestroyMatrixInfo(matrix_info));
status=AcquireMagickResource(MapResource,matrix_info->length);
if (status != MagickFalse)
{
status=SetMatrixExtent(matrix_info,matrix_info->length);
if (status != MagickFalse)
matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0,
(size_t) matrix_info->length);
if (matrix_info->elements != NULL)
matrix_info->type=MapCache;
else
RelinquishMagickResource(MapResource,matrix_info->length);
}
}
return(matrix_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMagickMatrix() allocates and returns a matrix in the form of an
% array of pointers to an array of doubles, with all values pre-set to zero.
%
% This used to generate the two dimensional matrix, and vectors required
% for the GaussJordanElimination() method below, solving some system of
% simultanious equations.
%
% The format of the AcquireMagickMatrix method is:
%
% double **AcquireMagickMatrix(const size_t number_rows,
% const size_t size)
%
% A description of each parameter follows:
%
% o number_rows: the number pointers for the array of pointers
% (first dimension).
%
% o size: the size of the array of doubles each pointer points to
% (second dimension).
%
*/
MagickExport double **AcquireMagickMatrix(const size_t number_rows,
const size_t size)
{
double
**matrix;
ssize_t
i,
j;
matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix));
if (matrix == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) number_rows; i++)
{
matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i]));
if (matrix[i] == (double *) NULL)
{
for (j=0; j < i; j++)
matrix[j]=(double *) RelinquishMagickMemory(matrix[j]);
matrix=(double **) RelinquishMagickMemory(matrix);
return((double **) NULL);
}
for (j=0; j < (ssize_t) size; j++)
matrix[i][j]=0.0;
}
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMatrixInfo() dereferences a matrix, deallocating memory associated
% with the matrix.
%
% The format of the DestroyImage method is:
%
% MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
LockSemaphoreInfo(matrix_info->semaphore);
switch (matrix_info->type)
{
case MemoryCache:
{
if (matrix_info->mapped == MagickFalse)
matrix_info->elements=RelinquishMagickMemory(matrix_info->elements);
else
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=(unsigned short *) NULL;
}
RelinquishMagickResource(MemoryResource,matrix_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=NULL;
RelinquishMagickResource(MapResource,matrix_info->length);
}
case DiskCache:
{
if (matrix_info->file != -1)
(void) close(matrix_info->file);
(void) RelinquishUniqueFileResource(matrix_info->path);
RelinquishMagickResource(DiskResource,matrix_info->length);
break;
}
default:
break;
}
UnlockSemaphoreInfo(matrix_info->semaphore);
RelinquishSemaphoreInfo(&matrix_info->semaphore);
return((MatrixInfo *) RelinquishMagickMemory(matrix_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G a u s s J o r d a n E l i m i n a t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussJordanElimination() returns a matrix in reduced row echelon form,
% while simultaneously reducing and thus solving the augumented results
% matrix.
%
% See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
% The format of the GaussJordanElimination method is:
%
% MagickBooleanType GaussJordanElimination(double **matrix,
% double **vectors,const size_t rank,const size_t number_vectors)
%
% A description of each parameter follows:
%
% o matrix: the matrix to be reduced, as an 'array of row pointers'.
%
% o vectors: the additional matrix argumenting the matrix for row reduction.
% Producing an 'array of column vectors'.
%
% o rank: The size of the matrix (both rows and columns).
% Also represents the number terms that need to be solved.
%
% o number_vectors: Number of vectors columns, argumenting the above matrix.
% Usally 1, but can be more for more complex equation solving.
%
% Note that the 'matrix' is given as a 'array of row pointers' of rank size.
% That is values can be assigned as matrix[row][column] where 'row' is
% typically the equation, and 'column' is the term of the equation.
% That is the matrix is in the form of a 'row first array'.
%
% However 'vectors' is a 'array of column pointers' which can have any number
% of columns, with each column array the same 'rank' size as 'matrix'.
%
% This allows for simpler handling of the results, especially is only one
% column 'vector' is all that is required to produce the desired solution.
%
% For example, the 'vectors' can consist of a pointer to a simple array of
% doubles. when only one set of simultanious equations is to be solved from
% the given set of coefficient weighted terms.
%
% double **matrix = AcquireMagickMatrix(8UL,8UL);
% double coefficents[8];
% ...
% GaussJordanElimination(matrix, &coefficents, 8UL, 1UL);
%
% However by specifing more 'columns' (as an 'array of vector columns',
% you can use this function to solve a set of 'separable' equations.
%
% For example a distortion function where u = U(x,y) v = V(x,y)
% And the functions U() and V() have separate coefficents, but are being
% generated from a common x,y->u,v data set.
%
% Another example is generation of a color gradient from a set of colors at
% specific coordients, such as a list x,y -> r,g,b,a.
%
% You can also use the 'vectors' to generate an inverse of the given 'matrix'
% though as a 'column first array' rather than a 'row first array'. For
% details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
*/
MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix,
double **vectors,const size_t rank,const size_t number_vectors)
{
#define GaussJordanSwap(x,y) \
{ \
if ((x) != (y)) \
{ \
(x)+=(y); \
(y)=(x)-(y); \
(x)=(x)-(y); \
} \
}
double
max,
scale;
ssize_t
i,
j,
k;
ssize_t
column,
*columns,
*pivots,
row,
*rows;
columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns));
rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows));
pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots));
if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) ||
(pivots == (ssize_t *) NULL))
{
if (pivots != (ssize_t *) NULL)
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
if (columns != (ssize_t *) NULL)
columns=(ssize_t *) RelinquishMagickMemory(columns);
if (rows != (ssize_t *) NULL)
rows=(ssize_t *) RelinquishMagickMemory(rows);
return(MagickFalse);
}
(void) memset(columns,0,rank*sizeof(*columns));
(void) memset(rows,0,rank*sizeof(*rows));
(void) memset(pivots,0,rank*sizeof(*pivots));
column=0;
row=0;
for (i=0; i < (ssize_t) rank; i++)
{
max=0.0;
for (j=0; j < (ssize_t) rank; j++)
if (pivots[j] != 1)
{
for (k=0; k < (ssize_t) rank; k++)
if (pivots[k] != 0)
{
if (pivots[k] > 1)
return(MagickFalse);
}
else
if (fabs(matrix[j][k]) >= max)
{
max=fabs(matrix[j][k]);
row=j;
column=k;
}
}
pivots[column]++;
if (row != column)
{
for (k=0; k < (ssize_t) rank; k++)
GaussJordanSwap(matrix[row][k],matrix[column][k]);
for (k=0; k < (ssize_t) number_vectors; k++)
GaussJordanSwap(vectors[k][row],vectors[k][column]);
}
rows[i]=row;
columns[i]=column;
if (matrix[column][column] == 0.0)
return(MagickFalse); /* sigularity */
scale=PerceptibleReciprocal(matrix[column][column]);
matrix[column][column]=1.0;
for (j=0; j < (ssize_t) rank; j++)
matrix[column][j]*=scale;
for (j=0; j < (ssize_t) number_vectors; j++)
vectors[j][column]*=scale;
for (j=0; j < (ssize_t) rank; j++)
if (j != column)
{
scale=matrix[j][column];
matrix[j][column]=0.0;
for (k=0; k < (ssize_t) rank; k++)
matrix[j][k]-=scale*matrix[column][k];
for (k=0; k < (ssize_t) number_vectors; k++)
vectors[k][j]-=scale*vectors[k][column];
}
}
for (j=(ssize_t) rank-1; j >= 0; j--)
if (columns[j] != rows[j])
for (i=0; i < (ssize_t) rank; i++)
GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]);
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
rows=(ssize_t *) RelinquishMagickMemory(rows);
columns=(ssize_t *) RelinquishMagickMemory(columns);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x C o l u m n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixColumns() returns the number of columns in the matrix.
%
% The format of the GetMatrixColumns method is:
%
% size_t GetMatrixColumns(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->columns);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixElement() returns the specifed element in the matrix.
%
% The format of the GetMatrixElement method is:
%
% MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: return the matrix element in this buffer.
%
*/
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline MagickOffsetType ReadMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX));
#else
count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PREAD)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+
EdgeX(x,matrix_info->columns);
if (matrix_info->type != DiskCache)
{
(void) memcpy(value,(unsigned char *) matrix_info->elements+i*
matrix_info->stride,matrix_info->stride);
return(MagickTrue);
}
count=ReadMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x R o w s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixRows() returns the number of rows in the matrix.
%
% The format of the GetMatrixRows method is:
%
% size_t GetMatrixRows(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info)
{
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->rows);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L e a s t S q u a r e s A d d T e r m s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LeastSquaresAddTerms() adds one set of terms and associate results to the
% given matrix and vectors for solving using least-squares function fitting.
%
% The format of the AcquireMagickMatrix method is:
%
% void LeastSquaresAddTerms(double **matrix,double **vectors,
% const double *terms,const double *results,const size_t rank,
% const size_t number_vectors);
%
% A description of each parameter follows:
%
% o matrix: the square matrix to add given terms/results to.
%
% o vectors: the result vectors to add terms/results to.
%
% o terms: the pre-calculated terms (without the unknown coefficent
% weights) that forms the equation being added.
%
% o results: the result(s) that should be generated from the given terms
% weighted by the yet-to-be-solved coefficents.
%
% o rank: the rank or size of the dimensions of the square matrix.
% Also the length of vectors, and number of terms being added.
%
% o number_vectors: Number of result vectors, and number or results being
% added. Also represents the number of separable systems of equations
% that is being solved.
%
% Example of use...
%
% 2 dimensional Affine Equations (which are separable)
% c0*x + c2*y + c4*1 => u
% c1*x + c3*y + c5*1 => v
%
% double **matrix = AcquireMagickMatrix(3UL,3UL);
% double **vectors = AcquireMagickMatrix(2UL,3UL);
% double terms[3], results[2];
% ...
% for each given x,y -> u,v
% terms[0] = x;
% terms[1] = y;
% terms[2] = 1;
% results[0] = u;
% results[1] = v;
% LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL);
% ...
% if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) {
% c0 = vectors[0][0];
% c2 = vectors[0][1];
% c4 = vectors[0][2];
% c1 = vectors[1][0];
% c3 = vectors[1][1];
% c5 = vectors[1][2];
% }
% else
% printf("Matrix unsolvable\n");
% RelinquishMagickMatrix(matrix,3UL);
% RelinquishMagickMatrix(vectors,2UL);
%
*/
MagickPrivate void LeastSquaresAddTerms(double **matrix,double **vectors,
const double *terms,const double *results,const size_t rank,
const size_t number_vectors)
{
ssize_t
i,
j;
for (j=0; j < (ssize_t) rank; j++)
{
for (i=0; i < (ssize_t) rank; i++)
matrix[i][j]+=terms[i]*terms[j];
for (i=0; i < (ssize_t) number_vectors; i++)
vectors[i][j]+=results[i]*terms[j];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t r i x T o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatrixToImage() returns a matrix as an image. The matrix elements must be
% of type double otherwise nonsense is returned.
%
% The format of the MatrixToImage method is:
%
% Image *MatrixToImage(const MatrixInfo *matrix_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
max_value,
min_value,
scale_factor;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (matrix_info->stride < sizeof(double))
return((Image *) NULL);
/*
Determine range of matrix.
*/
(void) GetMatrixElement(matrix_info,0,0,&min_value);
max_value=min_value;
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) matrix_info->columns; x++)
{
double
value;
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
if (value < min_value)
min_value=value;
else
if (value > max_value)
max_value=value;
}
}
if ((min_value == 0.0) && (max_value == 0.0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
/*
Convert matrix to image.
*/
image=AcquireImage((ImageInfo *) NULL,exception);
image->columns=matrix_info->columns;
image->rows=matrix_info->rows;
image->colorspace=GRAYColorspace;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
value;
Quantum
*q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
value=scale_factor*(value-min_value);
*q=ClampToQuantum(value);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N u l l M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NullMatrix() sets all elements of the matrix to zero.
%
% The format of the memset method is:
%
% MagickBooleanType *NullMatrix(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info)
{
ssize_t
x;
ssize_t
count,
y;
unsigned char
value;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
if (matrix_info->type != DiskCache)
{
(void) memset(matrix_info->elements,0,(size_t)
matrix_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(matrix_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
for (x=0; x < (ssize_t) matrix_info->length; x++)
{
count=write(matrix_info->file,&value,sizeof(value));
if (count != (ssize_t) sizeof(value))
break;
}
if (x < (ssize_t) matrix_info->length)
break;
}
return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n q u i s h M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishMagickMatrix() frees the previously acquired matrix (array of
% pointers to arrays of doubles).
%
% The format of the RelinquishMagickMatrix method is:
%
% double **RelinquishMagickMatrix(double **matrix,
% const size_t number_rows)
%
% A description of each parameter follows:
%
% o matrix: the matrix to relinquish
%
% o number_rows: the first dimension of the acquired matrix (number of
% pointers)
%
*/
MagickExport double **RelinquishMagickMatrix(double **matrix,
const size_t number_rows)
{
ssize_t
i;
if (matrix == (double **) NULL )
return(matrix);
for (i=0; i < (ssize_t) number_rows; i++)
matrix[i]=(double *) RelinquishMagickMemory(matrix[i]);
matrix=(double **) RelinquishMagickMemory(matrix);
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMatrixElement() sets the specifed element in the matrix.
%
% The format of the SetMatrixElement method is:
%
% MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: set the matrix element to this value.
%
*/
MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,const void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) y*matrix_info->columns+x;
if ((i < 0) ||
((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length))
return(MagickFalse);
if (matrix_info->type != DiskCache)
{
(void) memcpy((unsigned char *) matrix_info->elements+i*
matrix_info->stride,value,matrix_info->stride);
return(MagickTrue);
}
count=WriteMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
|
quick_sort_omp.h | // g++ -std=c++17 -fopenmp test_quick_sort.cpp -o test
// Multithreaded quicksort using openMP
#include <omp.h>
// Swaps two pointer values
template<typename T>
void swap(T* a, T* b)
{
T tmp = *a;
*a = *b;
*b = tmp;
}
// Partitions the array such that:
// - The entry args[j] is in its final place in the array, for some j.
// - No entry in args[lo] through args[j-1] is greater than args[j].
// - No entry in args[j+1] through a[hi] is less than args[j].
template <typename T>
int partition(T* args, int lo, int hi)
{
T pivot = args[hi];
int i, j;
i = lo-1;
for (j = lo; j <= hi-1; ++j)
{
if(args[j] <= pivot)
{
++i;
swap(&args[i], &args[j]);
}
}
swap(&args[i+1], &args[hi]);
return (i + 1);
}
// Sorts an array in ascending order from index 'lo' to 'hi'
template<typename T>
void quick_sort(T* args, int lo, int hi)
{
int pivot;
if(lo < hi)
{
pivot = partition(args, lo, hi);
#pragma omp task shared(args) firstprivate(lo, pivot)
quick_sort(args, lo, pivot-1);
#pragma omp task shared(args) firstprivate(pivot, hi)
quick_sort(args, pivot+1, hi);
#pragma omp taskwait
}
return;
}
|
GB_subref_template.c | //------------------------------------------------------------------------------
// GB_subref_template: C = A(I,J)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// GB_subref_templat extracts a submatrix, C = A(I,J). The method is done in
// two phases. Phase 1 just counts the entries in C, and phase 2 constructs
// the pattern and values of C. There are 3 kinds of subref:
//
// symbolic: C(i,j) is the position of A(I(i),J(j)) in the matrix A
// iso: C = A(I,J), extracting the pattern only, not the values
// numeric: C = A(I,J), extracting the pattern and values
#if defined ( GB_SYMBOLIC )
// symbolic method must tolerate zombies
#define GB_Ai(p) GBI_UNFLIP (Ai, p, avlen)
#else
// iso and non-iso numeric methods will not see any zombies
#define GB_Ai(p) GBI (Ai, p, avlen)
#endif
// to iterate across all entries in a bucket:
#define GB_for_each_index_in_bucket(inew,i) \
for (int64_t inew = Mark [i] - 1 ; inew >= 0 ; inew = Inext [inew])
//------------------------------------------------------------------------------
{
//--------------------------------------------------------------------------
// get A and I
//--------------------------------------------------------------------------
const int64_t *restrict Ai = A->i ;
const int64_t avlen = A->vlen ;
// these values are ignored if Ikind == GB_LIST
int64_t ibegin = Icolon [GxB_BEGIN] ;
int64_t iinc = Icolon [GxB_INC ] ;
int64_t inc = (iinc < 0) ? (-iinc) : iinc ;
#ifdef GB_DEBUG
int64_t iend = Icolon [GxB_END ] ;
#endif
//--------------------------------------------------------------------------
// phase1: count entries in each C(:,kC); phase2: compute C
//--------------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
bool fine_task = (klast < 0) ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
}
// a coarse task accesses all of I for all its vectors
int64_t pI = 0 ;
int64_t pI_end = nI ;
int64_t ilen = nI ;
ASSERT (0 <= kfirst && kfirst <= klast && klast < Cnvec) ;
//----------------------------------------------------------------------
// compute all vectors C(:,kfirst:klast) for this task
//----------------------------------------------------------------------
for (int64_t kC = kfirst ; kC <= klast ; kC++)
{
//------------------------------------------------------------------
// get C(:,kC)
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
// phase1 simply counts the # of entries in C(*,kC).
int64_t clen = 0 ;
#else
// This task computes all or part of C(:,kC), which are the entries
// in Ci,Cx [pC:pC_end-1].
int64_t pC, pC_end ;
if (fine_task)
{
// A fine task computes a slice of C(:,kC)
pC = TaskList [taskid ].pC ;
pC_end = TaskList [taskid+1].pC ;
ASSERT (Cp [kC] <= pC && pC <= pC_end && pC_end <= Cp [kC+1]) ;
}
else
{
// The vectors of C are never sliced for a coarse task, so this
// task computes all of C(:,kC).
pC = Cp [kC] ;
pC_end = Cp [kC+1] ;
}
int64_t clen = pC_end - pC ;
if (clen == 0) continue ;
#endif
//------------------------------------------------------------------
// get A(:,kA)
//------------------------------------------------------------------
int64_t pA, pA_end ;
if (fine_task)
{
// a fine task computes a slice of a single vector C(:,kC).
// The task accesses Ai,Ax [pA:pA_end-1], which holds either
// the entire vector A(imin:imax,kA) for method 6, the entire
// dense A(:,kA) for methods 1 and 2, or a slice of the
// A(imin:max,kA) vector for all other methods.
pA = TaskList [taskid].pA ;
pA_end = TaskList [taskid].pA_end ;
}
else
{
// a coarse task computes the entire vector C(:,kC). The task
// accesses all of A(imin:imax,kA), for most methods, or all of
// A(:,kA) for methods 1 and 2. The vector A(*,kA) appears in
// Ai,Ax [pA:pA_end-1].
pA = Ap_start [kC] ;
pA_end = Ap_end [kC] ;
}
int64_t alen = pA_end - pA ;
if (alen == 0) continue ;
//------------------------------------------------------------------
// get I
//------------------------------------------------------------------
if (fine_task)
{
// A fine task accesses I [pI:pI_end-1]. For methods 2 and 6,
// pI:pI_end is a subset of the entire 0:nI-1 list. For all
// other methods, pI = 0 and pI_end = nI, and the task can
// access all of I.
pI = TaskList [taskid].pB ;
pI_end = TaskList [taskid].pB_end ;
ilen = pI_end - pI ;
}
//------------------------------------------------------------------
// determine the method to use
//------------------------------------------------------------------
int method ;
if (fine_task)
{
// The method that the fine task uses for its slice of A(*,kA)
// and C(*,kC) has already been determined by GB_subref_slice.
method = (int) (-TaskList [taskid].klast) ;
}
else
{
// determine the method based on A(*,kA) and I
method = GB_subref_method (NULL, NULL, alen, avlen, Ikind, nI,
(Mark != NULL), need_qsort, iinc, nduplicates) ;
}
//------------------------------------------------------------------
// extract C (:,kC) = A (I,kA): consider all cases
//------------------------------------------------------------------
switch (method)
{
//--------------------------------------------------------------
case 1 : // C(:,kC) = A(:,kA) where A(:,kA) is dense
//--------------------------------------------------------------
// A (:,kA) has not been sliced
ASSERT (Ikind == GB_ALL) ;
ASSERT (pA == Ap_start [kC]) ;
ASSERT (pA_end == Ap_end [kC]) ;
// copy the entire vector and construct indices
#if defined ( GB_PHASE_1_OF_2 )
clen = ilen ;
#else
for (int64_t k = 0 ; k < ilen ; k++)
{
int64_t inew = k + pI ;
ASSERT (inew == GB_ijlist (I, inew, Ikind, Icolon)) ;
ASSERT (inew == GB_Ai (pA + inew)) ;
Ci [pC + k] = inew ;
}
GB_COPY_RANGE (pC, pA + pI, ilen) ;
#endif
break ;
//--------------------------------------------------------------
case 2 : // C(:,kC) = A(I,kA) where A(I,kA) is dense
//--------------------------------------------------------------
// This method handles any kind of list I, but A(:,kA)
// must be dense. A(:,kA) has not been sliced.
ASSERT (pA == Ap_start [kC]) ;
ASSERT (pA_end == Ap_end [kC]) ;
// scan I and get the entry in A(:,kA) via direct lookup
#if defined ( GB_PHASE_1_OF_2 )
clen = ilen ;
#else
for (int64_t k = 0 ; k < ilen ; k++)
{
// C(inew,kC) = A(i,kA), and it always exists.
int64_t inew = k + pI ;
int64_t i = GB_ijlist (I, inew, Ikind, Icolon) ;
ASSERT (i == GB_Ai (pA + i)) ;
Ci [pC + k] = inew ;
GB_COPY_ENTRY (pC + k, pA + i) ;
}
#endif
break ;
//--------------------------------------------------------------
case 3 : // the list I has a single index, ibegin
//--------------------------------------------------------------
// binary search in GB_subref_phase0 has already found it.
// This can be any Ikind with nI=1: GB_ALL with A->vlen=1,
// GB_RANGE with ibegin==iend, GB_STRIDE such as 0:-1:0
// (with length 1), or a GB_LIST with ni=1.
// Time: 50x faster
ASSERT (!fine_task) ;
ASSERT (alen == 1) ;
ASSERT (nI == 1) ;
ASSERT (GB_Ai (pA) == GB_ijlist (I, 0, Ikind, Icolon)) ;
#if defined ( GB_PHASE_1_OF_2 )
clen = 1 ;
#else
Ci [pC] = 0 ;
GB_COPY_ENTRY (pC, pA) ;
#endif
break ;
//--------------------------------------------------------------
case 4 : // Ikind is ":", thus C(:,kC) = A (:,kA)
//--------------------------------------------------------------
// Time: 1x faster but low speedup on the Mac. Why?
// Probably memory bound since it is just memcpy's.
ASSERT (Ikind == GB_ALL && ibegin == 0) ;
#if defined ( GB_PHASE_1_OF_2 )
clen = alen ;
#else
#if defined ( GB_SYMBOLIC )
if (nzombies == 0)
{
memcpy (Ci + pC, Ai + pA, alen * sizeof (int64_t)) ;
}
else
{
// with zombies
for (int64_t k = 0 ; k < alen ; k++)
{
// symbolic C(:,kC) = A(:,kA) where A has zombies
int64_t i = GB_Ai (pA + k) ;
ASSERT (i == GB_ijlist (I, i, Ikind, Icolon)) ;
Ci [pC + k] = i ;
}
}
#else
memcpy (Ci + pC, Ai + pA, alen * sizeof (int64_t)) ;
#endif
GB_COPY_RANGE (pC, pA, alen) ;
#endif
break ;
//--------------------------------------------------------------
case 5 : // Ikind is GB_RANGE = ibegin:iend
//--------------------------------------------------------------
// Time: much faster. Good speedup too.
ASSERT (Ikind == GB_RANGE) ;
#if defined ( GB_PHASE_1_OF_2 )
clen = alen ;
#else
for (int64_t k = 0 ; k < alen ; k++)
{
int64_t i = GB_Ai (pA + k) ;
int64_t inew = i - ibegin ;
ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ;
Ci [pC + k] = inew ;
}
GB_COPY_RANGE (pC, pA, alen) ;
#endif
break ;
//--------------------------------------------------------------
case 6 : // I is short vs nnz (A (:,kA)), use binary search
//--------------------------------------------------------------
// Time: very slow unless I is very short and A(:,kA) is
// very long.
// This case can handle any kind of I, and A(:,kA) of any
// properties. For a fine task, A(:,kA) has not been
// sliced; I has been sliced instead.
// If the I bucket inverse has not been created, this
// method is the only option. Alternatively, if nI =
// length (I) is << nnz (A (:,kA)), then scanning I and
// doing a binary search of A (:,kA) is faster than doing a
// linear-time search of A(:,kA) and a lookup into the I
// bucket inverse.
// The vector of C is constructed in sorted order, so no
// sort is needed.
// A(:,kA) has not been sliced.
ASSERT (pA == Ap_start [kC]) ;
ASSERT (pA_end == Ap_end [kC]) ;
// scan I, in order, and search for the entry in A(:,kA)
for (int64_t k = 0 ; k < ilen ; k++)
{
// C(inew,kC) = A (i,kA), if it exists.
// i = I [inew] ; or from a colon expression
int64_t inew = k + pI ;
int64_t i = GB_ijlist (I, inew, Ikind, Icolon) ;
bool found ;
int64_t pleft = pA ;
int64_t pright = pA_end - 1 ;
#if defined ( GB_SYMBOLIC )
bool is_zombie ;
GB_BINARY_SEARCH_ZOMBIE (i, Ai, pleft, pright, found,
nzombies, is_zombie) ;
#else
GB_BINARY_SEARCH (i, Ai, pleft, pright, found) ;
#endif
if (found)
{
ASSERT (i == GB_Ai (pleft)) ;
#if defined ( GB_PHASE_1_OF_2 )
clen++ ;
#else
ASSERT (pC < pC_end) ;
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pleft) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
break ;
//--------------------------------------------------------------
case 7 : // I is ibegin:iinc:iend with iinc > 1
//--------------------------------------------------------------
// Time: 1 thread: C=A(1:2:n,:) is 3x slower
// but has good speedup. About as fast with
// enough threads.
ASSERT (Ikind == GB_STRIDE && iinc > 1) ;
for (int64_t k = 0 ; k < alen ; k++)
{
// A(i,kA) present; see if it is in ibegin:iinc:iend
int64_t i = GB_Ai (pA + k) ;
ASSERT (ibegin <= i && i <= iend) ;
i = i - ibegin ;
if (i % iinc == 0)
{
// i is in the sequence ibegin:iinc:iend
#if defined ( GB_PHASE_1_OF_2 )
clen++ ;
#else
int64_t inew = i / iinc ;
ASSERT (pC < pC_end) ;
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
break ;
//----------------------------------------------------------
case 8 : // I = ibegin:(-iinc):iend, with iinc < -1
//----------------------------------------------------------
// Time: 2x slower for iinc = -2 or -8.
// Good speedup though. Faster for
// large values (iinc = -128).
ASSERT (Ikind == GB_STRIDE && iinc < -1) ;
for (int64_t k = alen - 1 ; k >= 0 ; k--)
{
// A(i,kA) present; see if it is in ibegin:iinc:iend
int64_t i = GB_Ai (pA + k) ;
ASSERT (iend <= i && i <= ibegin) ;
i = ibegin - i ;
if (i % inc == 0)
{
// i is in the sequence ibegin:iinc:iend
#if defined ( GB_PHASE_1_OF_2 )
clen++ ;
#else
int64_t inew = i / inc ;
ASSERT (pC < pC_end) ;
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
break ;
//----------------------------------------------------------
case 9 : // I = ibegin:(-1):iend
//----------------------------------------------------------
// Time: much faster. Good speedup.
ASSERT (Ikind == GB_STRIDE && iinc == -1) ;
#if defined ( GB_PHASE_1_OF_2 )
clen = alen ;
#else
for (int64_t k = alen - 1 ; k >= 0 ; k--)
{
// A(i,kA) is present
int64_t i = GB_Ai (pA + k) ;
int64_t inew = (ibegin - i) ;
ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ;
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
}
#endif
break ;
//--------------------------------------------------------------
case 10 : // I unsorted, and C needs qsort, duplicates OK
//--------------------------------------------------------------
// Time: with one thread: 2x slower, probably
// because of the qsort. Good speedup however. This used
// if qsort is needed but ndupl == 0. Try a method that
// needs qsort, but no duplicates?
// Case 10 works well when I has many entries and A(:,kA)
// has few entries. C(:,kC) must be sorted after this pass.
ASSERT (Ikind == GB_LIST) ;
for (int64_t k = 0 ; k < alen ; k++)
{
// A(i,kA) present, look it up in the I inverse buckets
int64_t i = GB_Ai (pA + k) ;
// traverse bucket i for all indices inew where
// i == I [inew] or where i is from a colon expression
GB_for_each_index_in_bucket (inew, i)
{
ASSERT (inew >= 0 && inew < nI) ;
ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ;
#if defined ( GB_PHASE_1_OF_2 )
clen++ ;
#else
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
#endif
}
}
// TODO: skip the sort if C is allowed to be jumbled on
// output. Flag C as jumbled instead.
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
if (!fine_task)
{
// a coarse task owns this entire C(:,kC) vector, so
// the sort can be done now. The sort for vectors
// handled by multiple fine tasks must wait until all
// task are completed, below in the post sort.
pC = Cp [kC] ;
#if defined ( GB_ISO_SUBREF )
// iso numeric subref C=A(I,J)
// just sort the pattern of C(:,kC)
GB_qsort_1 (Ci + pC, clen) ;
#else
// sort the pattern of C(:,kC), and the values
GB_qsort_1b (Ci + pC, (GB_void *) (Cx + pC*GB_CSIZE1),
GB_CSIZE2, clen) ;
#endif
}
#endif
break ;
//--------------------------------------------------------------
case 11 : // I not contiguous, with duplicates. No qsort needed
//--------------------------------------------------------------
// Case 11 works well when I has many entries and A(:,kA)
// has few entries. It requires that I be sorted on input,
// so that no sort is required for C(:,kC). It is
// otherwise identical to Case 10.
ASSERT (Ikind == GB_LIST) ;
for (int64_t k = 0 ; k < alen ; k++)
{
// A(i,kA) present, look it up in the I inverse buckets
int64_t i = GB_Ai (pA + k) ;
// traverse bucket i for all indices inew where
// i == I [inew] or where i is from a colon expression
GB_for_each_index_in_bucket (inew, i)
{
ASSERT (inew >= 0 && inew < nI) ;
ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ;
#if defined ( GB_PHASE_1_OF_2 )
clen++ ;
#else
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
break ;
//--------------------------------------------------------------
case 12 : // I not contiguous, no duplicates. No qsort needed.
//--------------------------------------------------------------
// Identical to Case 11, except GB_for_each_index_in_bucket
// just needs to iterate 0 or 1 times. Works well when I
// has many entries and A(:,kA) has few entries.
ASSERT (Ikind == GB_LIST && nduplicates == 0) ;
for (int64_t k = 0 ; k < alen ; k++)
{
// A(i,kA) present, look it up in the I inverse buckets
int64_t i = GB_Ai (pA + k) ;
// bucket i has at most one index inew such that
// i == I [inew]
int64_t inew = Mark [i] - 1 ;
if (inew >= 0)
{
ASSERT (inew >= 0 && inew < nI) ;
ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ;
#if defined ( GB_PHASE_1_OF_2 )
clen++ ;
#else
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
break ;
//--------------------------------------------------------------
default: ;
//--------------------------------------------------------------
}
//------------------------------------------------------------------
// final count of nnz (C (:,j))
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (fine_task)
{
TaskList [taskid].pC = clen ;
}
else
{
Cp [kC] = clen ;
}
#endif
}
}
//--------------------------------------------------------------------------
// phase2: post sort for any vectors handled by fine tasks with method 10
//--------------------------------------------------------------------------
#if defined ( GB_PHASE_2_OF_2 )
{
if (post_sort)
{
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
int64_t kC = TaskList [taskid].kfirst ;
bool do_post_sort = (TaskList [taskid].len != 0) ;
if (do_post_sort)
{
// This is the first fine task with method 10 for C(:,kC).
// The vector C(:,kC) must be sorted, since method 10 left
// it with unsorted indices.
int64_t pC = Cp [kC] ;
int64_t clen = Cp [kC+1] - pC ;
#if defined ( GB_ISO_SUBREF )
{
// iso numeric subref C=A(I,J)
// just sort the pattern of C(:,kC)
GB_qsort_1 (Ci + pC, clen) ;
}
#else
{
// sort the pattern of C(:,kC), and the values
GB_qsort_1b (Ci + pC, (GB_void *) (Cx + pC*GB_CSIZE1),
GB_CSIZE2, clen) ;
}
#endif
}
}
}
}
#endif
}
#undef GB_Ai
#undef GB_for_each_index_in_bucket
#undef GB_COPY_RANGE
#undef GB_COPY_ENTRY
#undef GB_CSIZE1
#undef GB_CSIZE2
#undef GB_SYMBOLIC
#undef GB_ISO_SUBREF
|
GB_binop__islt_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int64)
// A*D function (colscale): GB (_AxD__islt_int64)
// D*A function (rowscale): GB (_DxB__islt_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int64)
// C=scalar+B GB (_bind1st__islt_int64)
// C=scalar+B' GB (_bind1st_tran__islt_int64)
// C=A+scalar GB (_bind2nd__islt_int64)
// C=A'+scalar GB (_bind2nd_tran__islt_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_INT64 || GxB_NO_ISLT_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__islt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mpm_search_element_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ \.
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Bodhinanda Chandra
//
#ifndef KRATOS_MPM_SEARCH_ELEMENT_UTILITY
#define KRATOS_MPM_SEARCH_ELEMENT_UTILITY
// System includes
// External includes
// Project includes
#include "includes/define.h"
#include "utilities/binbased_fast_point_locator.h"
#include "utilities/quadrature_points_utility.h"
#include "particle_mechanics_application_variables.h"
#include "geometries/geometry.h"
#include "includes/model_part.h"
#include "pqmpm_partition_utilities.h"
namespace Kratos
{
namespace MPMSearchElementUtility
{
// Standard types
typedef std::size_t IndexType;
typedef std::size_t SizeType;
typedef Node<3> NodeType;
typedef typename ModelPart::GeometryType GeometryType;
inline double CrossProductDet2D(array_1d<double, 3> VectorA, array_1d<double, 3> VectorB)
{
return (VectorA[0] * VectorB[1] - VectorB[0] * VectorA[1]);
}
inline bool CheckIsInside(const GeometryType& rGeom, array_1d<double, 3>& LocalCoords, const array_1d<double, 3>& Coords, const double Tolerance, const bool IsCalcLocalCoords = true)
{
bool is_inside = true;
if (rGeom.Dimension() == 2)
{
is_inside = true;
// Do walk around method
Vector cross_products(rGeom.PointsNumber());
for (size_t i = 0; i < rGeom.PointsNumber(); ++i)
{
if (rGeom.Points()[i].Coordinates()[2] != 0.0) {
return rGeom.IsInside(Coords, LocalCoords, Tolerance);
break;
}
cross_products[i] = CrossProductDet2D(Coords - rGeom.Points()[i].Coordinates(),
rGeom.Points()[(i + 1) % rGeom.PointsNumber()].Coordinates() - rGeom.Points()[i].Coordinates());
}
for (size_t i = 1; i < cross_products.size(); ++i)
{
if (cross_products[i] * cross_products[0] < -std::abs(Tolerance))
{
is_inside = false;
break;
}
}
}
else return rGeom.IsInside(Coords, LocalCoords, Tolerance);
if (is_inside) {
if (IsCalcLocalCoords) return rGeom.IsInside(Coords, LocalCoords, Tolerance);
else return true;
}
return false;
}
inline void ConstructNeighbourRelations(GeometryType& rGeom, const ModelPart& rBackgroundGridModelPart)
{
std::vector<typename Geometry<Node<3>>::Pointer> geometry_neighbours;
for (IndexType j = 0; j < rBackgroundGridModelPart.NumberOfElements(); j++)
{
auto p_geometry_neighbour = (rBackgroundGridModelPart.ElementsBegin() + j)->pGetGeometry();
if (p_geometry_neighbour->Id() != rGeom.Id()) // dont add the parent as its own neighbour
{
for (IndexType n = 0; n < p_geometry_neighbour->size(); n++)
{
for (IndexType k = 0; k < rGeom.size(); k++)
{
if (rGeom[k].Id() == (*p_geometry_neighbour)[n].Id()) {
// Prevent duplicate additions
bool add_entry = true;
for (size_t i = 0; i < geometry_neighbours.size(); i++)
{
if (geometry_neighbours[i]->Id() == p_geometry_neighbour->Id())
{
add_entry = false;
break;
}
}
if (add_entry)
{
geometry_neighbours.push_back(p_geometry_neighbour);
}
break;
}
}
}
}
}
#pragma omp critical
rGeom.SetValue(GEOMETRY_NEIGHBOURS, geometry_neighbours);
}
inline bool IsExplicitAndNeedsCorrection(GeometryType::Pointer pQuadraturePoint, const ProcessInfo& rProcessInfo)
{
if (rProcessInfo.Has(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) {
if (rProcessInfo.GetValue(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) {
if (pQuadraturePoint->IntegrationPointsNumber() == 1)
{
for (size_t i = 0; i < pQuadraturePoint->ShapeFunctionsValues().size2(); ++i)
{
if (pQuadraturePoint->ShapeFunctionsValues()(0, i) < std::numeric_limits<double>::epsilon()) return true;
}
}
}
}
return false;
}
inline GeometryType& FindGridGeom(GeometryType& rParentGeom,
const ModelPart& rBackgroundGridModelPart,
const double Tolerance,
const array_1d<double, 3>& xg,
array_1d<double, 3>& rLocalCoords,
const ProcessInfo& rProcessInfo,
bool& IsFound)
{
IsFound = false;
if (CheckIsInside(rParentGeom, rLocalCoords, xg, Tolerance)) {
IsFound = true;
return rParentGeom;
}
else
{
if (!rParentGeom.Has(GEOMETRY_NEIGHBOURS))
ConstructNeighbourRelations(rParentGeom, rBackgroundGridModelPart);
auto& geometry_neighbours = rParentGeom.GetValue(GEOMETRY_NEIGHBOURS);
for (IndexType k = 0; k < geometry_neighbours.size(); ++k) {
if (CheckIsInside(*geometry_neighbours[k], rLocalCoords, xg, Tolerance)) {
IsFound = true;
return *(geometry_neighbours[k].get());
}
}
}
return rParentGeom;
}
inline void UpdatePartitionedQuadraturePoint(const ModelPart& rBackgroundGridModelPart,
const array_1d<double, 3>& rCoordinates,
Element& rMasterMaterialPoint,
typename GeometryType::Pointer pQuadraturePointGeometry,
const double Tolerance)
{
KRATOS_TRY;
array_1d<double, 3> local_coords;
pQuadraturePointGeometry->IsInside(rCoordinates, local_coords, Tolerance);
PQMPMPartitionUtilities::PartitionMasterMaterialPointsIntoSubPoints(rBackgroundGridModelPart, rCoordinates,
local_coords, rMasterMaterialPoint, pQuadraturePointGeometry, Tolerance);
KRATOS_CATCH("");
}
inline void NeighbourSearchElements(const ModelPart& rMPMModelPart,
const ModelPart& rBackgroundGridModelPart,
std::vector<typename Element::Pointer>& rMissingElements,
const double Tolerance)
{
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(rMPMModelPart.Elements().size()); ++i) {
auto element_itr = (rMPMModelPart.ElementsBegin() + i);
array_1d<double, 3> local_coordinates;
bool is_found = false;
std::vector<array_1d<double, 3>> xg;
element_itr->CalculateOnIntegrationPoints(MP_COORD, xg, rBackgroundGridModelPart.GetProcessInfo());
GeometryType& r_found_geom = FindGridGeom(element_itr->GetGeometry().GetGeometryParent(0),
rBackgroundGridModelPart, Tolerance, xg[0], local_coordinates,
rMPMModelPart.GetProcessInfo(), is_found);
if (is_found)
{
const bool is_pqmpm = (rBackgroundGridModelPart.GetProcessInfo().Has(IS_PQMPM))
? rBackgroundGridModelPart.GetProcessInfo().GetValue(IS_PQMPM) : false;
if (is_pqmpm)
{
// Updates the quadrature point geometry.
(*element_itr).GetGeometry().SetGeometryParent(&r_found_geom);
PQMPMPartitionUtilities::PartitionMasterMaterialPointsIntoSubPoints(rBackgroundGridModelPart, xg[0],
local_coordinates, *element_itr, element_itr->pGetGeometry(), Tolerance);
}
else
{
CreateQuadraturePointsUtility<Node<3>>::UpdateFromLocalCoordinates(
element_itr->pGetGeometry(), local_coordinates,
element_itr->GetGeometry().IntegrationPoints()[0].Weight(), r_found_geom);
}
if (IsExplicitAndNeedsCorrection(element_itr->pGetGeometry(), rBackgroundGridModelPart.GetProcessInfo()))
is_found = false;
else {
for (IndexType j = 0; j < r_found_geom.PointsNumber(); ++j)
r_found_geom.Points()[j].Set(ACTIVE);
}
}
if(!is_found)
{
#pragma omp critical
rMissingElements.push_back(&*element_itr);
}
}
}
//
inline void NeighbourSearchConditions(const ModelPart& rMPMModelPart,
const ModelPart& rBackgroundGridModelPart,
std::vector<typename Condition::Pointer>& rMissingConditions,
const double Tolerance)
{
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(rMPMModelPart.Conditions().size()); ++i) {
auto condition_itr = rMPMModelPart.Conditions().begin() + i;
std::vector<array_1d<double, 3>> xg;
condition_itr->CalculateOnIntegrationPoints(MPC_COORD, xg, rMPMModelPart.GetProcessInfo());
if (xg.size() > 0 && condition_itr->Is(BOUNDARY))
{
array_1d<double, 3> local_coordinates;
bool is_found = false;
GeometryType& r_found_geom = FindGridGeom(condition_itr->GetGeometry(),
rBackgroundGridModelPart, Tolerance, xg[0], local_coordinates,
rMPMModelPart.GetProcessInfo(), is_found);
if (is_found)
{
condition_itr->GetGeometry() = r_found_geom;
for (IndexType j = 0; j < r_found_geom.PointsNumber(); ++j)
r_found_geom[j].Set(ACTIVE);
}
else
{
#pragma omp critical
rMissingConditions.push_back(&*condition_itr);
}
}
}
}
inline bool IsFixExplicitAndOnElementEdge(const Vector& N, const ProcessInfo& rProcessInfo)
{
if (rProcessInfo.Has(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) {
if (rProcessInfo.GetValue(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) {
// check if MP is exactly on the edge of the element, this gives spurious strains in explicit
for (SizeType i = 0; i < N.size(); ++i) {
if (std::abs(N[i]) < std::numeric_limits<double>::epsilon()) {
return true;
}
}
}
}
return false;
}
template <std::size_t TDimension>
void BinBasedSearchElementsAndConditions(ModelPart& rMPMModelPart,
ModelPart& rBackgroundGridModelPart,
std::vector<typename Element::Pointer>& rMissingElements,
std::vector<typename Condition::Pointer>& rMissingConditions,
const std::size_t MaxNumberOfResults, const double Tolerance)
{
const ProcessInfo& r_process_info = rBackgroundGridModelPart.GetProcessInfo();
bool is_pqmpm = (r_process_info.Has(IS_PQMPM))
? r_process_info.GetValue(IS_PQMPM) : false;
// Search background grid and make element active
Vector N;
const int max_result = 1000;
#pragma omp parallel
{
BinBasedFastPointLocator<TDimension> SearchStructure(rBackgroundGridModelPart);
SearchStructure.UpdateSearchDatabase();
typename BinBasedFastPointLocator<TDimension>::ResultContainerType results(max_result);
// Element search and assign background grid
#pragma omp for
for (int i = 0; i < static_cast<int>(rMissingElements.size()); ++i) {
auto element_itr = *(rMissingElements.begin() + i);
std::vector<array_1d<double, 3>> xg;
element_itr->CalculateOnIntegrationPoints(MP_COORD, xg, rMPMModelPart.GetProcessInfo());
typename BinBasedFastPointLocator<TDimension>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelem;
// FindPointOnMesh find the background element in which a given point falls and the relative shape functions
bool is_found = SearchStructure.FindPointOnMesh(xg[0], N, pelem, result_begin, MaxNumberOfResults, Tolerance);
if (is_found == true) {
if (IsFixExplicitAndOnElementEdge(N, r_process_info) && !is_pqmpm) {
// MP is exactly on the edge. Now we give it a little 'nudge'
array_1d<double, 3> xg_nudged = array_1d<double, 3>(xg[0]);
std::vector<array_1d<double, 3>> mp_vel;
element_itr->CalculateOnIntegrationPoints(MP_VELOCITY, mp_vel, rMPMModelPart.GetProcessInfo());
xg_nudged += r_process_info[DELTA_TIME] / 1000.0 * mp_vel[0];
if (SearchStructure.FindPointOnMesh(xg_nudged, N, pelem, result_begin, MaxNumberOfResults, Tolerance)) {
element_itr->SetValuesOnIntegrationPoints(MP_COORD, { xg_nudged }, rMPMModelPart.GetProcessInfo());
KRATOS_INFO("MPMSearchElementUtility") << "WARNING: To prevent spurious explicit stresses, Material Point "
<< element_itr->Id() << " was nudged." << std::endl;
} else {
is_found = SearchStructure.FindPointOnMesh(xg[0], N, pelem, result_begin, MaxNumberOfResults, Tolerance);
KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Material Point " << element_itr->Id()
<< " lies exactly on an element edge and may give spurious results." << std::endl;
}
}
pelem->Set(ACTIVE);
const bool is_pqmpm = (rBackgroundGridModelPart.GetProcessInfo().Has(IS_PQMPM))
? rBackgroundGridModelPart.GetProcessInfo().GetValue(IS_PQMPM) : false;
if (is_pqmpm)
{
// Updates the quadrature point geometry.
(*element_itr).GetGeometry().SetGeometryParent((pelem->pGetGeometry().get()));
UpdatePartitionedQuadraturePoint(rBackgroundGridModelPart, xg[0],
*element_itr, pelem->pGetGeometry(), Tolerance);
}
else
{
auto p_quadrature_point_geometry = element_itr->pGetGeometry();
array_1d<double, 3> local_coordinates;
p_quadrature_point_geometry->PointLocalCoordinates(local_coordinates, xg[0]);
CreateQuadraturePointsUtility<Node<3>>::UpdateFromLocalCoordinates(
p_quadrature_point_geometry, local_coordinates,
p_quadrature_point_geometry->IntegrationPoints()[0].Weight(), pelem->GetGeometry());
}
auto& r_geometry = element_itr->GetGeometry();
for (IndexType j = 0; j < r_geometry.PointsNumber(); ++j)
r_geometry[j].Set(ACTIVE);
}
else {
KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Search Element for Material Point: " << element_itr->Id()
<< " is failed. Geometry is cleared." << std::endl;
element_itr->GetGeometry().clear();
element_itr->Reset(ACTIVE);
element_itr->Set(TO_ERASE);
}
}
// Condition search and assign background grid
#pragma omp for
for (int i = 0; i < static_cast<int>(rMissingConditions.size()); ++i) {
auto condition_itr = *(rMissingConditions.begin() + i);
std::vector<array_1d<double, 3>> xg;
condition_itr->CalculateOnIntegrationPoints(MPC_COORD, xg, rMPMModelPart.GetProcessInfo());
if (xg.size() > 0) {
// Only search for particle based BCs!
// Grid BCs are still applied on MP_model_part but we don't want to search for them.
typename BinBasedFastPointLocator<TDimension>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelem;
// FindPointOnMesh find the background element in which a given point falls and the relative shape functions
bool is_found = SearchStructure.FindPointOnMesh(xg[0], N, pelem, result_begin, MaxNumberOfResults, Tolerance);
if (is_found == true) {
condition_itr->GetGeometry() = pelem->GetGeometry();
auto& r_geometry = condition_itr->GetGeometry();
for (IndexType j = 0; j < r_geometry.PointsNumber(); ++j)
r_geometry[j].Set(ACTIVE);
} else {
KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Search Element for Material Point Condition: " << condition_itr->Id()
<< " is failed. Geometry is cleared." << std::endl;
condition_itr->GetGeometry().clear();
condition_itr->Reset(ACTIVE);
condition_itr->Set(TO_ERASE);
}
}
}
}
}
inline void ResetElementsAndNodes(ModelPart& rBackgroundGridModelPart)
{
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(rBackgroundGridModelPart.Elements().size()); ++i) {
auto element_itr = rBackgroundGridModelPart.Elements().begin() + i;
auto& r_geometry = element_itr->GetGeometry();
element_itr->Reset(ACTIVE);
for (IndexType j = 0; j < r_geometry.PointsNumber(); ++j)
r_geometry[j].Reset(ACTIVE);
}
}
/**
* @brief Search element connectivity for each particle
* @details A search is performed to know in which grid element the material point falls.
* If one or more material points fall in the grid element, the grid element is
* set to be active and its connectivity is associated to the material point
* element.
* STEPS:
* 1) All the elements are set to be INACTIVE
* 2) A searching is performed and the grid elements which contain at least a MP are set to be ACTIVE
*
*/
template<std::size_t TDimension>
void SearchElement(ModelPart& rBackgroundGridModelPart, ModelPart& rMPMModelPart, const std::size_t MaxNumberOfResults,
const double Tolerance)
{
ResetElementsAndNodes(rBackgroundGridModelPart);
std::vector<typename Element::Pointer> missing_elements;
std::vector<typename Condition::Pointer> missing_conditions;
NeighbourSearchElements(rMPMModelPart, rBackgroundGridModelPart, missing_elements, Tolerance);
NeighbourSearchConditions(rMPMModelPart, rBackgroundGridModelPart, missing_conditions, Tolerance);
if (missing_conditions.size() > 0 || missing_elements.size() > 0)
BinBasedSearchElementsAndConditions<TDimension>(rMPMModelPart,
rBackgroundGridModelPart, missing_elements, missing_conditions,
MaxNumberOfResults, Tolerance);
}
} // end namespace MPMSearchElementUtility
} // end namespace Kratos
#endif // KRATOS_MPM_SEARCH_ELEMENT_UTILITY
|
array_section_use_device_ptr.c | // RUN: %libomptarget-compile-generic -fopenmp-version=51
// RUN: %libomptarget-run-generic 2>&1 \
// RUN: | %fcheck-generic
#include <stdio.h>
#include <stdlib.h>
#define N 1024
#define FROM 64
#define LENGTH 128
int main() {
float *A = (float *)malloc(N * sizeof(float));
#pragma omp target enter data map(to : A [FROM:LENGTH])
// A, has been mapped starting at index FROM, but inside the use_device_ptr
// clause it is captured by base so the library must look it up using the
// base address.
float *A_dev = NULL;
#pragma omp target data use_device_ptr(A)
{ A_dev = A; }
#pragma omp target exit data map(delete : A [FROM:LENGTH])
// CHECK: Success
if (A_dev == NULL || A_dev == A)
fprintf(stderr, "Failure\n");
else
fprintf(stderr, "Success\n");
free(A);
return 0;
}
|
pr38704.c | // RUN: %libomptarget-compile-run-and-check-generic
// Clang 6.0 doesn't use the new map interface, undefined behavior when
// the compiler emits "old" interface code for structures.
// UNSUPPORTED: clang-6
#include <stdio.h>
#include <stdlib.h>
typedef struct {
int *ptr1;
int *ptr2;
} StructWithPtrs;
int main(int argc, char *argv[]) {
StructWithPtrs s, s2;
s.ptr1 = malloc(sizeof(int));
s.ptr2 = malloc(2 * sizeof(int));
s2.ptr1 = malloc(sizeof(int));
s2.ptr2 = malloc(2 * sizeof(int));
#pragma omp target enter data map(to: s2.ptr2[0:1])
#pragma omp target map(s.ptr1[0:1], s.ptr2[0:2])
{
s.ptr1[0] = 1;
s.ptr2[0] = 2;
s.ptr2[1] = 3;
}
#pragma omp target exit data map(from: s2.ptr1[0:1], s2.ptr2[0:1])
// CHECK: s.ptr1[0] = 1
// CHECK: s.ptr2[0] = 2
// CHECK: s.ptr2[1] = 3
printf("s.ptr1[0] = %d\n", s.ptr1[0]);
printf("s.ptr2[0] = %d\n", s.ptr2[0]);
printf("s.ptr2[1] = %d\n", s.ptr2[1]);
free(s.ptr1);
free(s.ptr2);
free(s2.ptr1);
free(s2.ptr2);
return 0;
}
|
GB_unop__minv_int64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_int64_int64)
// op(A') function: GB (_unop_tran__minv_int64_int64)
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 64)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 64) ;
// casting
#define GB_CAST(z, aij) \
int64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = aij ; \
Cx [pC] = GB_IMINV_SIGNED (z, 64) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_int64_int64)
(
int64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 64) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 64) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_int64_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ast-dump-openmp-begin-declare-variant_4.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s
// expected-no-diagnostics
#pragma omp begin declare variant match(device={kind(cpu)})
int also_before(void) {
return 0;
}
#pragma omp end declare variant
int also_after(void) {
return 0;
}
int test() {
// Should return 0.
return also_after() + also_before();
}
// Make sure:
// - we do see the ast nodes for the cpu kind
// - we pick the right callees
// CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, col:21> col:5 implicit used also_before 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_1:0x[a-z0-9]*]] <<invalid sloc>> Implicit device={kind(cpu)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_2:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_3:0x[a-z0-9]*]] 'also_before[device={kind(cpu)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_3]] <col:1, line:8:1> line:6:1 also_before[device={kind(cpu)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_4:0x[a-z0-9]*]] <col:23, line:8:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_5:0x[a-z0-9]*]] <line:7:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_6:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:11:1, line:13:1> line:11:5 used also_after 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_8:0x[a-z0-9]*]] <col:22, line:13:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_9:0x[a-z0-9]*]] <line:12:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_10:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: `-FunctionDecl [[ADDR_11:0x[a-z0-9]*]] <line:15:1, line:18:1> line:15:5 test 'int ({{.*}})'
// CHECK-NEXT: `-CompoundStmt [[ADDR_12:0x[a-z0-9]*]] <col:12, line:18:1>
// CHECK-NEXT: `-ReturnStmt [[ADDR_13:0x[a-z0-9]*]] <line:17:3, col:37>
// CHECK-NEXT: `-BinaryOperator [[ADDR_14:0x[a-z0-9]*]] <col:10, col:37> 'int' '+'
// CHECK-NEXT: |-CallExpr [[ADDR_15:0x[a-z0-9]*]] <col:10, col:21> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_17:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_7]] 'also_after' 'int ({{.*}})'
// CHECK-NEXT: `-PseudoObjectExpr [[ADDR_18:0x[a-z0-9]*]] <col:25, col:37> 'int'
// CHECK-NEXT: |-CallExpr [[ADDR_19:0x[a-z0-9]*]] <col:25, col:37> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_20:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_21:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// CHECK-NEXT: `-CallExpr [[ADDR_22:0x[a-z0-9]*]] <line:6:1, line:17:37> 'int'
// CHECK-NEXT: `-ImplicitCastExpr [[ADDR_23:0x[a-z0-9]*]] <line:6:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: `-DeclRefExpr [[ADDR_2]] <col:1> 'int ({{.*}})' Function [[ADDR_3]] 'also_before[device={kind(cpu)}]' 'int ({{.*}})'
|
vednnActivationForward.c |
#include <stdio.h>
#include <stdint.h>
#include "vednnActivationForward.h"
#ifdef VEDNN_USE_OPENMP
#include <stdint.h>
#include <omp.h>
extern int __vednn_omp_num_threads ;
#endif
static inline vednnError_t
vednnActivationForward_wrapper(
vednnActivationForward_t pFunc,
const void *pDataIn,
void *pDataOut,
const uint64_t nElements
)
{
#ifdef VEDNN_USE_OPENMP
if ( __vednn_omp_num_threads == 1 ) {
return pFunc(pDataIn, pDataOut, nElements) ;
}
else {
vednnError_t rc = VEDNN_SUCCESS ;
#pragma omp parallel reduction(|:rc)
{
int64_t nthreads = omp_get_num_threads() ;
int64_t threadid = omp_get_thread_num() ;
int64_t eachNElement = nElements / nthreads ;
int64_t remain = nElements % nthreads ;
int64_t elementBegin = eachNElement * threadid + ( threadid < remain ? threadid : remain ) ;
int64_t myElement = eachNElement + ( threadid < remain ? 1 : 0 ) ;
if( myElement == 0 ) {
rc |= VEDNN_SUCCESS ;
}
else {
float* _pDataIn = ((float *)pDataIn) + elementBegin ;
float* _pDataOut = ((float *)pDataOut) + elementBegin ;
rc |= pFunc((void*)_pDataIn, (void*) _pDataOut, myElement) ;
}
}
return rc ;
}
#else
return pFunc(pDataIn, pDataOut, nElements) ;
#endif
}
/* ----------------------------------------------------------------------- */
vednnError_t vednnActivationForward(
const vednnActivationMode_t mode,
const void *pDataIn,
void *pDataOut,
const uint64_t nElements
)
{
switch(mode) {
case VEDNN_ACTIVATION_RELU :
return vednnActivationForward_wrapper(
vednnActivationForward_Relu,
pDataIn, pDataOut, nElements ) ;
default :
fprintf(stderr, "VEDNN Error : vednnActivationForward : Invalid Parameter !!\n") ;
return VEDNN_ERROR_INVALID_PARAM ;
}
}
|
boundary.c | /**
* @file boundary.c
* @brief Implementation of all boundary conditions.
* @author Hanno Rein <hanno@hanno-rein.de>
*
* @details The code supports different boundary conditions.
*
*
* @section LICENSE
* Copyright (c) 2015 Hanno Rein, Shangfei Liu
*
* This file is part of rebound.
*
* rebound is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* rebound is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with rebound. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <time.h>
#include "particle.h"
#include "integrator.h"
#include "rebound.h"
#include "boundary.h"
#include "tree.h"
void reb_boundary_check(struct reb_simulation* const r){
struct reb_particle* const particles = r->particles;
int N = r->N;
const struct reb_vec3d boxsize = r->boxsize;
switch(r->boundary){
case REB_BOUNDARY_OPEN:
for (int i=0;i<N;i++){ // run through loop backwards so we don't have to recheck same index after removing
int removep = 0;
if(particles[i].x>boxsize.x/2.){
removep = 1;
}
if(particles[i].x<-boxsize.x/2.){
removep = 1;
}
if(particles[i].y>boxsize.y/2.){
removep = 1;
}
if(particles[i].y<-boxsize.y/2.){
removep = 1;
}
if(particles[i].z>boxsize.z/2.){
removep = 1;
}
if(particles[i].z<-boxsize.z/2.){
removep = 1;
}
if (removep==1){
// If hermes calculate energy offset in global
if(r->track_energy_offset){
double Ei = reb_tools_energy(r);
reb_remove(r, i,1);
r->energy_offset += Ei - reb_tools_energy(r);
} else {
reb_remove(r, i,0); // keepSorted=0 by default in C version
}
if (r->tree_root==NULL){
i--; // need to recheck the particle that replaced the removed one
N--; // This is the local N
}else{
// particle just marked, will be removed later
r->tree_needs_update= 1;
}
}
}
break;
case REB_BOUNDARY_SHEAR:
{
// The offset of ghostcell is time dependent.
const double OMEGA = r->ri_sei.OMEGA;
const double offsetp1 = -fmod(-1.5*OMEGA*boxsize.x*r->t+boxsize.y/2.,boxsize.y)-boxsize.y/2.;
const double offsetm1 = -fmod( 1.5*OMEGA*boxsize.x*r->t-boxsize.y/2.,boxsize.y)+boxsize.y/2.;
struct reb_particle* const particles = r->particles;
#pragma omp parallel for schedule(guided)
for (int i=0;i<N;i++){
// Radial
while(particles[i].x>boxsize.x/2.){
particles[i].x -= boxsize.x;
particles[i].y += offsetp1;
particles[i].vy += 3./2.*OMEGA*boxsize.x;
}
while(particles[i].x<-boxsize.x/2.){
particles[i].x += boxsize.x;
particles[i].y += offsetm1;
particles[i].vy -= 3./2.*OMEGA*boxsize.x;
}
// Azimuthal
while(particles[i].y>boxsize.y/2.){
particles[i].y -= boxsize.y;
}
while(particles[i].y<-boxsize.y/2.){
particles[i].y += boxsize.y;
}
// Vertical (there should be no boundary, but periodic makes life easier)
while(particles[i].z>boxsize.z/2.){
particles[i].z -= boxsize.z;
}
while(particles[i].z<-boxsize.z/2.){
particles[i].z += boxsize.z;
}
}
}
break;
case REB_BOUNDARY_PERIODIC:
#pragma omp parallel for schedule(guided)
for (int i=0;i<N;i++){
while(particles[i].x>boxsize.x/2.){
particles[i].x -= boxsize.x;
}
while(particles[i].x<-boxsize.x/2.){
particles[i].x += boxsize.x;
}
while(particles[i].y>boxsize.y/2.){
particles[i].y -= boxsize.y;
}
while(particles[i].y<-boxsize.y/2.){
particles[i].y += boxsize.y;
}
while(particles[i].z>boxsize.z/2.){
particles[i].z -= boxsize.z;
}
while(particles[i].z<-boxsize.z/2.){
particles[i].z += boxsize.z;
}
}
break;
default:
break;
}
}
const static struct reb_ghostbox nan_ghostbox = {.shiftx = 0, .shifty = 0, .shiftz = 0, .shiftvx = 0, .shiftvy = 0, .shiftvz = 0};
struct reb_ghostbox reb_boundary_get_ghostbox(struct reb_simulation* const r, int i, int j, int k){
switch(r->boundary){
case REB_BOUNDARY_OPEN:
{
struct reb_ghostbox gb;
gb.shiftx = r->boxsize.x*(double)i;
gb.shifty = r->boxsize.y*(double)j;
gb.shiftz = r->boxsize.z*(double)k;
gb.shiftvx = 0;
gb.shiftvy = 0;
gb.shiftvz = 0;
return gb;
}
case REB_BOUNDARY_SHEAR:
{
const double OMEGA = r->ri_sei.OMEGA;
struct reb_ghostbox gb;
// Ghostboxes habe a finite velocity.
gb.shiftvx = 0.;
gb.shiftvy = -1.5*(double)i*OMEGA*r->boxsize.x;
gb.shiftvz = 0.;
// The shift in the y direction is time dependent.
double shift;
if (i==0){
shift = -fmod(gb.shiftvy*r->t,r->boxsize.y);
}else{
if (i>0){
shift = -fmod(gb.shiftvy*r->t-r->boxsize.y/2.,r->boxsize.y)-r->boxsize.y/2.;
}else{
shift = -fmod(gb.shiftvy*r->t+r->boxsize.y/2.,r->boxsize.y)+r->boxsize.y/2.;
}
}
gb.shiftx = r->boxsize.x*(double)i;
gb.shifty = r->boxsize.y*(double)j-shift;
gb.shiftz = r->boxsize.z*(double)k;
return gb;
}
case REB_BOUNDARY_PERIODIC:
{
struct reb_ghostbox gb;
gb.shiftx = r->boxsize.x*(double)i;
gb.shifty = r->boxsize.y*(double)j;
gb.shiftz = r->boxsize.z*(double)k;
gb.shiftvx = 0;
gb.shiftvy = 0;
gb.shiftvz = 0;
return gb;
}
default:
return nan_ghostbox;
}
}
/**
* @brief Checks if a given particle is within the computational domain.
* @param p reb_particle to be checked.
* @param r REBOUND simulation to consider
* @return Return value is 1 if particle is inside the box and 0 otherwise.
*/
int reb_boundary_particle_is_in_box(const struct reb_simulation* const r, struct reb_particle p){
switch(r->boundary){
case REB_BOUNDARY_OPEN:
case REB_BOUNDARY_SHEAR:
if(p.x>r->boxsize.x/2.){
return 0;
}
if(p.x<-r->boxsize.x/2.){
return 0;
}
if(p.y>r->boxsize.y/2.){
return 0;
}
if(p.y<-r->boxsize.y/2.){
return 0;
}
if(p.z>r->boxsize.z/2.){
return 0;
}
if(p.z<-r->boxsize.z/2.){
return 0;
}
return 1;
default:
return 1;
}
}
|
blockchain.c | /*********************************************************************
Homework 5
CS 110: Computer Architecture, Spring 2021
ShanghaiTech University
* Last Modified: 03/28/2021
*********************************************************************/
#include "blockchain.h"
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include "hash_functions/sha256.h"
#include <stddef.h>
#include <stdio.h>
#define bwLittleEndian32(addr, x) ((*((WORD *)(addr))) = __builtin_bswap32(x))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
void getSha256State(blk_t *node, WORD state[8]) {
SHA256_CTX ctx;
sha256_init(&ctx);
sha256_update(&ctx, (unsigned char *)node, 256);
memcpy(state, ctx.state, 8 * sizeof(WORD));
}
void blockchain_node_init(blk_t *node, uint32_t index, uint32_t timestamp,
unsigned char prev_hash[32], unsigned char *data,
size_t data_size) {
if (!node || !data || !prev_hash)
return;
node->header.index = index;
node->header.timestamp = timestamp;
node->header.nonce = -1;
memset(node->header.data, 0, sizeof(unsigned char) * 256);
memcpy(node->header.prev_hash, prev_hash, HASH_BLOCK_SIZE);
memcpy(node->header.data, data,
sizeof(unsigned char) * ((data_size < 256) ? data_size : 256));
}
void blockchain_node_hash(blk_t *node, unsigned char hash_buf[HASH_BLOCK_SIZE],
hash_func func) {
if (node)
func((unsigned char *)node, sizeof(blkh_t), (unsigned char *)hash_buf);
}
BOOL blockchain_node_verify(blk_t *node, blk_t *prev_node, hash_func func) {
unsigned char hash_buf[HASH_BLOCK_SIZE];
if (!node || !prev_node)
return False;
blockchain_node_hash(node, hash_buf, func);
if (memcmp(node->hash, hash_buf, sizeof(unsigned char) * HASH_BLOCK_SIZE))
return False;
blockchain_node_hash(prev_node, hash_buf, func);
if (memcmp(node->header.prev_hash, hash_buf,
sizeof(unsigned char) * HASH_BLOCK_SIZE))
return False;
return True;
}
#define THREAD_NUM 20
#define NONCE_STEP (THREAD_NUM)
#define USE_AVX 1
#ifndef USE_AVX
#define USE_AVX 0
#endif
struct __attribute__((aligned(64))) ThreadData {
blkh_t header;
unsigned char hash_buf[HASH_BLOCK_SIZE];
int flag;
};
struct __attribute__((aligned(64))) ThreadDataAvx {
uint64_t nonce;
unsigned char hash_buf[HASH_BLOCK_SIZE];
int flag;
};
// extern void sha256_transform(SHA256_CTX *ctx, const BYTE data[]);
extern void transform8way(WORD state[8], const unsigned char *data, uint64_t nonce, WORD outputState[8][8]);
/* The sequiental implementation of mining implemented for you. */
void blockchain_node_mine(blk_t *node, unsigned char hash_buf[HASH_BLOCK_SIZE],
size_t diff, hash_func func) {
if (!USE_AVX || sizeof(blkh_t) != 304) {
int any_find_flag = 0, i;
unsigned char one_diff[HASH_BLOCK_SIZE];
size_t diff_q, diff_m;
diff_q = diff / 8;
diff_m = diff % 8;
memset(one_diff, 0xFF, sizeof(unsigned char) * HASH_BLOCK_SIZE);
memset(one_diff, 0, sizeof(unsigned char) * diff_q);
one_diff[diff_q] = ((uint8_t)0xFF) >> diff_m;
struct ThreadData thData[THREAD_NUM];
for (i = 0; i < THREAD_NUM; ++i) thData[i].flag = 0;
const int BEFORE_SIZE = offsetof(blkh_t, nonce);
const int AFTER_SIZE = sizeof(blkh_t) - BEFORE_SIZE - 8;
SHA256_CTX ctx;
sha256_init(&ctx);
sha256_update(&ctx, (unsigned char *)node, BEFORE_SIZE);
#pragma omp parallel num_threads(THREAD_NUM)
{
const int id = omp_get_thread_num();
memcpy(&thData[id].header, node, sizeof(blkh_t));
thData[id].header.nonce = id;
SHA256_CTX curCtx;
for (; unlikely(!any_find_flag);) {
memcpy(&curCtx, &ctx, sizeof(SHA256_CTX));
sha256_update(&curCtx, (BYTE *)&thData[id].header.nonce, 8);
sha256_update(&curCtx, ((BYTE *)&thData[id].header) + BEFORE_SIZE + 8, AFTER_SIZE);
sha256_final(&curCtx, thData[id].hash_buf);
// blockchain_node_hash((blk_t *)&thData[id].header, thData[id].hash_buf, func);
if (unlikely(likely((!memcmp(thData[id].hash_buf, one_diff, sizeof(unsigned char) * diff_q))) &&
unlikely(memcmp(&thData[id].hash_buf[diff_q], &one_diff[diff_q],
sizeof(unsigned char) * (HASH_BLOCK_SIZE - diff_q)) <= 0))) {
any_find_flag = 1;
thData[id].flag = 1;
break;
}
thData[id].header.nonce += NONCE_STEP;
}
}
for (i = 0; i < THREAD_NUM; ++i) {
if (thData[i].flag) {
node->header.nonce = thData[i].header.nonce;
memcpy(hash_buf, thData[i].hash_buf, HASH_BLOCK_SIZE);
memcpy(node->hash, thData[i].hash_buf, HASH_BLOCK_SIZE);
break;
}
}
(void)func;
} else {
int any_find_flag = 0, i;
unsigned char one_diff[HASH_BLOCK_SIZE];
size_t diff_q, diff_m;
diff_q = diff / 8;
diff_m = diff % 8;
memset(one_diff, 0xFF, sizeof(unsigned char) * HASH_BLOCK_SIZE);
memset(one_diff, 0, sizeof(unsigned char) * diff_q);
one_diff[diff_q] = ((uint8_t)0xFF) >> diff_m;
struct ThreadDataAvx thData[THREAD_NUM];
for (i = 0; i < THREAD_NUM; ++i) thData[i].flag = 0;
WORD state[8];
getSha256State(node, state);
unsigned char ctxData[64];
memcpy(ctxData, ((BYTE *)node) + 256, 40);
ctxData[48] = 0x80;
memset(ctxData + 48 + 1, 0, 55 + 6 - 48);
// bitlen = 4 * 512
// datalen = 48
// bitlen += datalen * 8
// bitlen = 2432
ctxData[63] = (BYTE)2432;
ctxData[62] = (BYTE)(2432 >> 8);
#pragma omp parallel num_threads(THREAD_NUM)
{
const int id = omp_get_thread_num();
thData[id].nonce = id * 8;
// SHA256_CTX curCtx;
// unsigned char curData[64];
// memcpy(curData, ctxData, 64);
WORD outputState[8][8];
for (; !any_find_flag;) {
// *(uint64_t *)(curData + 40) = thData[id].nonce;
// memcpy(curCtx.state, state, sizeof(state));
// sha256_transform(&curCtx, curData);
// bwLittleEndian32(thData[id].hash_buf, curCtx.state[0]);
// bwLittleEndian32(thData[id].hash_buf + 4, curCtx.state[1]);
// bwLittleEndian32(thData[id].hash_buf + 8, curCtx.state[2]);
// bwLittleEndian32(thData[id].hash_buf + 12, curCtx.state[3]);
// bwLittleEndian32(thData[id].hash_buf + 16, curCtx.state[4]);
// bwLittleEndian32(thData[id].hash_buf + 20, curCtx.state[5]);
// bwLittleEndian32(thData[id].hash_buf + 24, curCtx.state[6]);
// bwLittleEndian32(thData[id].hash_buf + 28, curCtx.state[7]);
// if ((!memcmp(thData[id].hash_buf, one_diff, sizeof(unsigned char) * diff_q)) &&
// memcmp(&thData[id].hash_buf[diff_q], &one_diff[diff_q],
// sizeof(unsigned char) * (HASH_BLOCK_SIZE - diff_q)) <= 0) {
// any_find_flag = 1;
// thData[id].flag = 1;
// break;
// }
// thData[id].nonce += NONCE_STEP;
transform8way(state, ctxData, thData[id].nonce, outputState);
for (int now = 0; now < 8; ++now) {
bwLittleEndian32(thData[id].hash_buf, outputState[0][now]);
bwLittleEndian32(thData[id].hash_buf + 4, outputState[1][now]);
bwLittleEndian32(thData[id].hash_buf + 8, outputState[2][now]);
bwLittleEndian32(thData[id].hash_buf + 12, outputState[3][now]);
bwLittleEndian32(thData[id].hash_buf + 16, outputState[4][now]);
bwLittleEndian32(thData[id].hash_buf + 20, outputState[5][now]);
bwLittleEndian32(thData[id].hash_buf + 24, outputState[6][now]);
bwLittleEndian32(thData[id].hash_buf + 28, outputState[7][now]);
if ((!memcmp(thData[id].hash_buf, one_diff, sizeof(unsigned char) * diff_q)) &&
memcmp(&thData[id].hash_buf[diff_q], &one_diff[diff_q],
sizeof(unsigned char) * (HASH_BLOCK_SIZE - diff_q)) <= 0) {
thData[id].nonce += 7 - now;
any_find_flag = 1;
thData[id].flag = 1;
break;
}
}
if (thData[id].flag) break;
thData[id].nonce += NONCE_STEP * 8;
}
}
for (i = 0; i < THREAD_NUM; ++i) {
if (thData[i].flag) {
node->header.nonce = thData[i].nonce;
memcpy(hash_buf, thData[i].hash_buf, HASH_BLOCK_SIZE);
memcpy(node->hash, hash_buf, HASH_BLOCK_SIZE);
break;
}
}
(void)func;
}
// while (True) {
// blockchain_node_hash(node, hash_buf, func);
// if ((!memcmp(hash_buf, one_diff, sizeof(unsigned char) * diff_q)) &&
// memcmp(&hash_buf[diff_q], &one_diff[diff_q],
// sizeof(unsigned char) * (HASH_BLOCK_SIZE - diff_q)) <= 0) {
// memcpy(node->hash, hash_buf, sizeof(unsigned char) * HASH_BLOCK_SIZE);
// break;
// }
// node->header.nonce++;
// }
// printf("nonce: %lu\n", node->header.nonce);
}
|
tinyexr.h | /*
Copyright (c) 2014 - 2019, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
#ifndef TINYEXR_H_
#define TINYEXR_H_
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
int tiled; // tile format image
int long_name; // long name attribute
int non_image; // deep image(EXR 2.0)
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
int data_window[4];
int display_window[4];
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { to be removed. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Free's internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Free's internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Free's error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEIFNED
#define TINYEXR_IMPLEMENTATION_DEIFNED
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
//#include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#if __cplusplus > 199711L
// C++11
#include <cstdint>
#endif // __cplusplus > 199711L
#ifdef _OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#include "zfp.h"
#endif
#if __cplusplus > 199711L
// C++11
#include <cstdint>
#endif // __cplusplus > 199711L
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#pragma clang diagnostic ignored "-Wundef"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#if __has_warning("-Wmacro-redefined")
#pragma clang diagnostic ignored "-Wmacro-redefined"
#endif
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#if __has_warning("-Wtautological-constant-compare")
#pragma clang diagnostic ignored "-Wtautological-constant-compare"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occured in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#ifndef MINIZ_HAS_64BIT_REGISTERS
# define MINIZ_HAS_64BIT_REGISTERS 0
#endif
#ifndef TINFL_USE_64BIT_BITBUF
# if MINIZ_HAS_64BIT_REGISTERS
# define TINFL_USE_64BIT_BITBUF 1
# else
# define TINFL_USE_64BIT_BITBUF 0
# endif
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
//#include <assert.h>
//#include <string.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end =
pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end =
pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags & ~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct {
mz_uint16 m_key, m_sym_index;
} tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n, use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment,
p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
} // namespace miniz
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen));
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
int data_window[4];
int line_order;
int display_window[4];
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
void clear() {
channels.clear();
attributes.clear();
data_window[0] = 0;
data_window[1] = 0;
data_window[2] = 0;
data_window[3] = 0;
line_order = 0;
display_window[0] = 0;
display_window[1] = 0;
display_window[2] = 0;
display_window[3] = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
}
} HeaderInfo;
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling));
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling));
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (miniz::MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressable run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierachical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
int len : 8; // code length 0
int lit : 24; // lit p size
int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
int *p = pl->p;
pl->p = new int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
if ((in + 1) >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
int precision;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0f;
}
};
bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) &&
(attributes[i].size == 1)) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
}
}
if (!foundType) {
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else {
assert(0);
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
int num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size = dst_width * dst_num_lines * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((dst_width & 3U) || (dst_num_lines & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, dst_width, dst_num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = dst_width * dst_num_lines;
for (int c = 0; c < num_channels; c++) {
// decompress 4x4 pixel block.
for (int y = 0; y < dst_num_lines; y += 4) {
for (int x = 0; x < dst_width; x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * dst_width + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize,
const float *inPtr, int width, int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((width & 3U) || (num_lines & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, width, num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = width * num_lines;
for (int c = 0; c < num_channels; c++) {
// compress 4x4 pixel block.
for (int y = 0; y < num_lines; y += 4) {
for (int x = 0; x < width; x += 4) {
float fblock[16];
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * width + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = zfp_stream_compressed_size(zfp);
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)),
dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param, attributes,
num_attributes)) {
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static void DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
assert(tile_offset_x * tile_size_x < data_width);
assert(tile_offset_y * tile_size_y < data_height);
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
info->data_window[0] = 0;
info->data_window[1] = 0;
info->data_window[2] = 0;
info->data_window[3] = 0;
info->line_order = 0; // @fixme
info->display_window[0] = 0;
info->display_window[1] = 0;
info->display_window[2] = 0;
info->display_window[3] = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (version->tiled && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window[0], &data.at(0), sizeof(int));
memcpy(&info->data_window[1], &data.at(4), sizeof(int));
memcpy(&info->data_window[2], &data.at(8), sizeof(int));
memcpy(&info->data_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3]));
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window[0], &data.at(0), sizeof(int));
memcpy(&info->display_window[1], &data.at(4), sizeof(int));
memcpy(&info->display_window[2], &data.at(8), sizeof(int));
memcpy(&info->display_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[1]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[2]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[3]));
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio));
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[1]));
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_width));
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count));
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window[0] = info.display_window[0];
exr_header->display_window[1] = info.display_window[1];
exr_header->display_window[2] = info.display_window[2];
exr_header->display_window[3] = info.display_window[3];
exr_header->data_window[0] = info.data_window[0];
exr_header->data_window[1] = info.data_window[1];
exr_header->data_window[2] = info.data_window[2];
exr_header->data_window[3] = info.data_window[3];
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy poiner
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const std::vector<tinyexr::tinyexr_uint64> &offsets,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1;
int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1;
if ((data_width < 0) || (data_height < 0)) {
if (err) {
std::stringstream ss;
ss << "Invalid data width or data height: " << data_width << ", "
<< data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if ((data_width > threshold) || (data_height > threshold)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety.
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
size_t num_tiles = offsets.size(); // = # of blocks
exr_image->tiles = static_cast<EXRTile *>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
exr_header->tile_size_x, exr_header->tile_size_y);
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
if (offsets[tile_idx] + sizeof(int) * 5 > size) {
if (err) {
(*err) += "Insufficient data size.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
size_t data_size = size - size_t(offsets[tile_idx] + sizeof(int) * 5);
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3]));
// @todo{ LoD }
if (tile_coordinates[2] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
if (tile_coordinates[3] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len < 4 || size_t(data_len) > data_size) {
if (err) {
(*err) += "Insufficient data length.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height, tile_coordinates[0],
tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y,
static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels), exr_header->channels,
channel_offset_list);
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
exr_image->num_tiles = static_cast<int>(num_tiles);
}
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown = sizeof(void*) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown ) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size = size - size_t(offsets[y_idx] + sizeof(int) * 2);
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example `data_len
// < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window[3] + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window[1]);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window[1];
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
} // omp parallel
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0];
if (data_width >= std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
data_width++;
int data_height = exr_header->data_window[3] - exr_header->data_window[1];
if (data_height >= std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
data_height++;
if ((data_width < 0) || (data_height < 0)) {
tinyexr::SetErrorMessage("data width or data height is negative.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if (data_width > threshold) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > threshold) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
size_t num_blocks = 0;
if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
} else if (exr_header->tiled) {
// @todo { LoD }
size_t num_x_tiles = static_cast<size_t>(data_width) /
static_cast<size_t>(exr_header->tile_size_x);
if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) <
static_cast<size_t>(data_width)) {
num_x_tiles++;
}
size_t num_y_tiles = static_cast<size_t>(data_height) /
static_cast<size_t>(exr_header->tile_size_y);
if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) <
static_cast<size_t>(data_height)) {
num_y_tiles++;
}
num_blocks = num_x_tiles * num_y_tiles;
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
}
std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks);
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
}
return ret;
}
}
} // namespace tinyexr
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
FreeEXRHeader(&exr_header);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
FreeEXRHeader(&exr_header);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
FreeEXRHeader(&exr_header);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return TINYEXR_ERROR_INVALID_HEADER;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
ConvertHeader(exr_header, info);
// transfoer `tiled` from version.
exr_header->tiled = version->tiled;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Failed to parse EXR version", err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
size_t SaveEXRImageToMemory(const EXRImage *exr_image,
const EXRHeader *exr_header,
unsigned char **memory_out, const char **err) {
if (exr_image == NULL || memory_out == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#endif
#if TINYEXR_USE_ZFP
for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) {
if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) {
tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
std::vector<unsigned char> memory;
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
memory.insert(memory.end(), header, header + 4);
}
// Version, scanline.
{
char marker[] = {2, 0, 0, 0};
/* @todo
if (exr_header->tiled) {
marker[1] |= 0x2;
}
if (exr_header->long_name) {
marker[1] |= 0x4;
}
if (exr_header->non_image) {
marker[1] |= 0x8;
}
if (exr_header->multipart) {
marker[1] |= 0x10;
}
*/
memory.insert(memory.end(), marker, marker + 4);
}
int num_scanlines = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
// Write attributes.
std::vector<tinyexr::ChannelInfo> channels;
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_header->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_header->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_header->channels[c].name);
channels.push_back(info);
}
tinyexr::WriteChannelInfo(data, channels);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_header->compression_type;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp));
tinyexr::WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char *>(&comp), 1);
}
{
int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1};
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3]));
tinyexr::WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
tinyexr::WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
float aspectRatio = 1.0f;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio));
tinyexr::WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[1]));
tinyexr::WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float));
}
{
float w = static_cast<float>(exr_image->width);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w),
sizeof(float));
}
// Custom attributes
if (exr_header->num_custom_attributes > 0) {
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_header->custom_attributes[i].name,
exr_header->custom_attributes[i].type,
reinterpret_cast<const unsigned char *>(
exr_header->custom_attributes[i].value),
exr_header->custom_attributes[i].size);
}
}
{ // end of header
unsigned char e = 0;
memory.push_back(e);
}
int num_blocks = exr_image->height / num_scanlines;
if (num_blocks * num_scanlines < exr_image->height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks));
size_t headerSize = memory.size();
tinyexr::tinyexr_uint64 offset =
headerSize +
static_cast<size_t>(num_blocks) *
sizeof(
tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable)
std::vector<std::vector<unsigned char> > data_list(
static_cast<size_t>(num_blocks));
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
}
#endif
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
size_t ii = static_cast<size_t>(i);
int start_y = num_scanlines * i;
int endY = (std::min)(num_scanlines * (i + 1), exr_image->height);
int h = endY - start_y;
std::vector<unsigned char> buf(
static_cast<size_t>(exr_image->width * h * pixel_data_size));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f));
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned short val = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
float val = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] * static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned int val = reinterpret_cast<unsigned int **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(buf.size());
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), buf.begin(),
buf.begin() + data_len);
} else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, exr_image->width, h);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
exr_image->width, h, exr_header->num_channels, zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else {
assert(0);
}
} // omp parallel
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size();
}
size_t totalSize = static_cast<size_t>(offset);
{
memory.insert(
memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)),
reinterpret_cast<unsigned char *>(&offsets.at(0)) +
sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks));
}
if (memory.size() == 0) {
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char *>(malloc(totalSize));
memcpy((*memory_out), &memory.at(0), memory.size());
unsigned char *memory_ptr = *memory_out + memory.size();
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size());
memory_ptr += data_list[i].size();
}
return totalSize; // OK
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "wb");
#else
FILE *fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _MSC_VER
FILE *fp = NULL;
errno_t errcode = fopen_s(&fp, filename, "rb");
if ((0 != errcode) || (!fp)) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh));
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&h));
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
return TINYEXR_SUCCESS;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
ConvertHeader(exr_header, infos[i]);
// transfoer `tiled` from version.
exr_header->tiled = exr_version->tiled;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list;
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> offset_table(
static_cast<size_t>(exr_headers[i]->chunk_count));
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
chunk_offset_table_list.push_back(offset_table);
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> &offset_table =
chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (size_t c = 0; c < offset_table.size(); c++) {
const unsigned char *part_number_addr =
memory + offset_table[c] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEIFNED
#endif // TINYEXR_IMPLEMENTATION
|
SpatialMaxUnpooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "THNN/generic/SpatialMaxUnpooling.c"
#else
static void THNN_(SpatialMaxUnpooling_updateOutput_frame)(scalar_t *input_p, scalar_t *output_p,
THIndex_t *ind_p,
int nslices,
int iwidth, int iheight,
int owidth, int oheight)
{
int k;
int has_error = 0;
THIndex_t error_index = 0;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
scalar_t *output_p_k = output_p + k*owidth*oheight;
scalar_t *input_p_k = input_p + k*iwidth*iheight;
THIndex_t *ind_p_k = ind_p + k*iwidth*iheight;
int i, j;
THIndex_t maxp;
for(i = 0; i < iheight; i++)
{
for(j = 0; j < iwidth; j++)
{
maxp = ind_p_k[i*iwidth + j]; /* retrieve position of max */
if(maxp<0 || maxp>=owidth*oheight){
#pragma omp critical
{
has_error = 1;
error_index = maxp;
}
} else {
output_p_k[maxp] = input_p_k[i*iwidth + j]; /* update output */
}
}
}
}
if (has_error) {
THError("found an invalid max index %ld (output volumes are of size %dx%d)",
error_index, oheight, owidth);
}
}
void THNN_(SpatialMaxUnpooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THIndexTensor *indices,
int owidth, int oheight)
{
int dimw = 2;
int dimh = 1;
int nbatch = 1;
int nslices;
int iheight;
int iwidth;
scalar_t *input_data;
scalar_t *output_data;
THIndex_t *indices_data;
AT_CHECK(!input->is_empty() && (input->dim() == 3 || input->dim() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input, but got sizes: ", input->sizes());
THNN_CHECK_SHAPE_INDICES(input, indices);
if (input->dim() == 4)
{
nbatch = input->size(0);
dimw++;
dimh++;
}
/* sizes */
nslices = input->size(dimh-1);
iheight = input->size(dimh);
iwidth = input->size(dimw);
/* get contiguous input and indices */
input = THTensor_(newContiguous)(input);
indices = THIndexTensor_(newContiguous)(indices);
/* resize output */
if (input->dim() == 3)
{
THTensor_(resize3d)(output, nslices, oheight, owidth);
THTensor_(zero)(output);
input_data = input->data<scalar_t>();
output_data = output->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
THNN_(SpatialMaxUnpooling_updateOutput_frame)(input_data, output_data,
indices_data,
nslices,
iwidth, iheight,
owidth, oheight);
}
else
{
int p;
THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth);
THTensor_(zero)(output);
input_data = input->data<scalar_t>();
output_data = output->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialMaxUnpooling_updateOutput_frame)(
input_data+p*nslices*iwidth*iheight,
output_data+p*nslices*owidth*oheight,
indices_data+p*nslices*iwidth*iheight,
nslices,
iwidth, iheight,
owidth, oheight);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(input);
THIndexTensor_(free)(indices);
}
static void THNN_(SpatialMaxUnpooling_updateGradInput_frame)(scalar_t *gradInput_p, scalar_t *gradOutput_p,
THIndex_t *ind_p,
int nslices,
int iwidth, int iheight,
int owidth, int oheight)
{
int k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
scalar_t *gradInput_p_k = gradInput_p + k*iwidth*iheight;
scalar_t *gradOutput_p_k = gradOutput_p + k*owidth*oheight;
THIndex_t *ind_p_k = ind_p + k*iwidth*iheight;
int i, j;
THIndex_t maxp;
for(i = 0; i < iheight; i++)
{
for(j = 0; j < iwidth; j++)
{
maxp = ind_p_k[i*iwidth + j]; /* retrieve position of max */
if(maxp < 0 || maxp >= owidth * oheight) {
THError("invalid max index %ld, owidth= %d, oheight= %d", maxp, owidth, oheight);
}
gradInput_p_k[i*iwidth + j] = gradOutput_p_k[maxp]; /* update gradient */
}
}
}
}
void THNN_(SpatialMaxUnpooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THIndexTensor *indices,
int owidth, int oheight)
{
int dimw = 2;
int dimh = 1;
int nbatch = 1;
int nslices;
int iheight;
int iwidth;
scalar_t *gradInput_data;
scalar_t *gradOutput_data;
THIndex_t *indices_data;
THNN_CHECK_SHAPE_INDICES(input, indices);
/* get contiguous gradOutput and indices */
gradOutput = THTensor_(newContiguous)(gradOutput);
indices = THIndexTensor_(newContiguous)(indices);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->dim() == 4) {
nbatch = input->size(0);
dimw++;
dimh++;
}
/* sizes */
nslices = input->size(dimh-1);
iheight = input->size(dimh);
iwidth = input->size(dimw);
if(owidth!=gradOutput->size(dimw) || oheight!=gradOutput->size(dimh)){
THError("Inconsistent gradOutput size. oheight= %d, owidth= %d, gradOutput: %dx%d",
oheight, owidth, gradOutput->size(dimh), gradOutput->size(dimw));
}
/* get raw pointers */
gradInput_data = gradInput->data<scalar_t>();
gradOutput_data = gradOutput->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
/* backprop */
if (input->dim() == 3)
{
THNN_(SpatialMaxUnpooling_updateGradInput_frame)(gradInput_data, gradOutput_data,
indices_data,
nslices,
iwidth, iheight,
owidth, oheight);
}
else
{
int p;
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialMaxUnpooling_updateGradInput_frame)(gradInput_data+p*nslices*iwidth*iheight, gradOutput_data+p*nslices*owidth*oheight,
indices_data+p*nslices*iwidth*iheight,
nslices,
iwidth, iheight,
owidth, oheight);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(gradOutput);
THIndexTensor_(free)(indices);
}
#endif
|
main.c | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <PeriodicFMM/FMMWrapper-c.h>
#include <PeriodicFMM/FMMWrapperWall2D-c.h>
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank = 0, size = 0;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
const int nsrc = 16384;
const int ntrg = 16384;
double *srcCoord = malloc(sizeof(double) * 3 * nsrc);
double *srcValue = malloc(sizeof(double) * 3 * nsrc);
double *trgCoord = malloc(sizeof(double) * 3 * ntrg);
double *trgValue = malloc(sizeof(double) * 3 * ntrg);
// some arbitrary data
#pragma omp parallel for
for (int i = 0; i < nsrc; i++) {
int seed = rank * nsrc + i;
srcCoord[3 * i] = fabs(sin(seed));
srcCoord[3 * i + 1] = fabs(cos(seed));
srcCoord[3 * i + 2] = fabs(sin(seed * seed));
srcValue[3 * i] = sin(seed);
srcValue[3 * i + 1] = sin(sin(seed));
srcValue[3 * i + 2] = cos(sin(seed));
}
#pragma omp parallel for
for (int i = 0; i < ntrg; i++) {
int seed = rank * nsrc + i;
trgCoord[3 * i] = fabs(cos(seed));
trgCoord[3 * i + 1] = fabs(sin(seed));
trgCoord[3 * i + 2] = fabs(cos(seed * seed));
trgValue[3 * i] = 0;
trgValue[3 * i + 1] = 0;
trgValue[3 * i + 2] = 0;
}
MPI_Barrier(MPI_COMM_WORLD);
// FMM_Wrapper
// Evaluate, clear, and Evaluate again
{
FMM_Wrapper *fmm = create_fmm_wrapper(12, 2000, 0, 7, 0);
FMM_SetBox(fmm, 0, 1, 0, 1, 0, 1);
FMM_UpdateTree(fmm, trgCoord, srcCoord, ntrg, nsrc);
FMM_Evaluate(fmm, trgValue, srcValue, ntrg, nsrc);
FMM_DataClear(fmm);
FMM_Evaluate(fmm, trgValue, srcValue, ntrg, nsrc);
delete_fmm_wrapper(fmm);
}
// FMM_WrapperWall2D
// Evaluate, clear, and Evaluate again
{
#pragma omp parallel for
for (int i = 0; i < nsrc; i++) {
srcCoord[3 * i + 2] *= 0.499;
}
#pragma omp parallel for
for (int i = 0; i < ntrg; i++) {
trgCoord[3 * i + 2] *= 0.499;
}
FMM_WrapperWall2D *fmm = create_fmm_wrapperwall2d(12, 2000, 0, 4);
FMMWall2D_SetBox(fmm, 0, 1, 0, 1, 0, 0.4999);
FMMWall2D_UpdateTree(fmm, trgCoord, srcCoord, ntrg, nsrc);
FMMWall2D_Evaluate(fmm, trgValue, srcValue, ntrg, nsrc);
FMMWall2D_DataClear(fmm);
FMMWall2D_Evaluate(fmm, trgValue, srcValue, ntrg, nsrc);
delete_fmm_wrapperwall2d(fmm);
}
free(srcCoord);
free(trgCoord);
free(srcValue);
free(trgValue);
MPI_Finalize();
return 0;
}
|
hci.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Alexander Sokolov <alexander.y.sokolov@gmail.com>
*
* Slater-Condon rule implementation for Heat-Bath CI
*/
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "hci.h"
#include <limits.h>
// Computes C' = H * C in the selected CI basis
void contract_h_c(double *h1, double *eri, int norb, int neleca, int nelecb, uint64_t *strs, double *civec, double *hdiag, uint64_t ndet, double *ci1) {
int *ts = malloc(sizeof(int) * ndet);
#pragma omp parallel
{
size_t ip, jp, p;
int nset = (norb + 63) / 64;
// Calculate excitation level for prescreening
ts[0] = 0;
uint64_t *str1a = strs;
uint64_t *str1b = strs + nset;
#pragma omp for schedule(static)
for (ip = 1; ip < ndet; ++ip) {
uint64_t *stria = strs + ip * 2 * nset;
uint64_t *strib = strs + ip * 2 * nset + nset;
ts[ip] = (n_excitations(stria, str1a, nset) + n_excitations(strib, str1b, nset));
}
// Loop over pairs of determinants
#pragma omp for schedule(static)
for (ip = 0; ip < ndet; ++ip) {
for (jp = 0; jp < ndet; ++jp) {
if (abs(ts[ip] - ts[jp]) < 3) {
uint64_t *stria = strs + ip * 2 * nset;
uint64_t *strib = strs + ip * 2 * nset + nset;
uint64_t *strja = strs + jp * 2 * nset;
uint64_t *strjb = strs + jp * 2 * nset + nset;
int n_excit_a = n_excitations(stria, strja, nset);
int n_excit_b = n_excitations(strib, strjb, nset);
// Diagonal term
if (ip == jp) {
ci1[ip] += hdiag[ip] * civec[ip];
}
// Single excitation
else if ((n_excit_a + n_excit_b) == 1) {
int *ia;
// alpha->alpha
if (n_excit_b == 0) {
ia = get_single_excitation(stria, strja, nset);
int i = ia[0];
int a = ia[1];
double sign = compute_cre_des_sign(a, i, stria, nset);
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
double fai = h1[a * norb + i];
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k;
fai += eri[kkai] - eri[kiak];
}
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
fai += eri[kkai];
}
if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp];
free(occsa);
free(occsb);
}
// beta->beta
else if (n_excit_a == 0) {
ia = get_single_excitation(strib, strjb, nset);
int i = ia[0];
int a = ia[1];
double sign = compute_cre_des_sign(a, i, strib, nset);
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
double fai = h1[a * norb + i];
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k;
fai += eri[kkai] - eri[kiak];
}
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
fai += eri[kkai];
}
if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp];
free(occsa);
free(occsb);
}
free(ia);
}
// Double excitation
else if ((n_excit_a + n_excit_b) == 2) {
int i, j, a, b;
// alpha,alpha->alpha,alpha
if (n_excit_b == 0) {
int *ijab = get_double_excitation(stria, strja, nset);
i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3];
double v, sign;
int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i;
int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j;
if (a > j || i > b) {
v = eri[ajbi] - eri[aibj];
sign = compute_cre_des_sign(b, i, stria, nset);
sign *= compute_cre_des_sign(a, j, stria, nset);
}
else {
v = eri[aibj] - eri[ajbi];
sign = compute_cre_des_sign(b, j, stria, nset);
sign *= compute_cre_des_sign(a, i, stria, nset);
}
if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp];
free(ijab);
}
// beta,beta->beta,beta
else if (n_excit_a == 0) {
int *ijab = get_double_excitation(strib, strjb, nset);
i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3];
double v, sign;
int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i;
int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j;
if (a > j || i > b) {
v = eri[ajbi] - eri[aibj];
sign = compute_cre_des_sign(b, i, strib, nset);
sign *= compute_cre_des_sign(a, j, strib, nset);
}
else {
v = eri[aibj] - eri[ajbi];
sign = compute_cre_des_sign(b, j, strib, nset);
sign *= compute_cre_des_sign(a, i, strib, nset);
}
if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp];
free(ijab);
}
// alpha,beta->alpha,beta
else {
int *ia = get_single_excitation(stria, strja, nset);
int *jb = get_single_excitation(strib, strjb, nset);
i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1];
double v = eri[a * norb * norb * norb + i * norb * norb + b * norb + j];
double sign = compute_cre_des_sign(a, i, stria, nset);
sign *= compute_cre_des_sign(b, j, strib, nset);
if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp];
free(ia);
free(jb);
}
}
} // end if over ts
} // end loop over jp
} // end loop over ip
} // end omp
free(ts);
}
// Compare two strings and compute excitation level
int n_excitations(uint64_t *str1, uint64_t *str2, int nset) {
size_t p;
int d = 0;
for (p = 0; p < nset; ++p) {
d += popcount(str1[p] ^ str2[p]);
}
return d / 2;
}
// Compute number of set bits in a string
int popcount(uint64_t x) {
const uint64_t m1 = 0x5555555555555555; //binary: 0101...
const uint64_t m2 = 0x3333333333333333; //binary: 00110011..
const uint64_t m4 = 0x0f0f0f0f0f0f0f0f; //binary: 4 zeros, 4 ones ...
const uint64_t m8 = 0x00ff00ff00ff00ff; //binary: 8 zeros, 8 ones ...
const uint64_t m16 = 0x0000ffff0000ffff; //binary: 16 zeros, 16 ones ...
const uint64_t m32 = 0x00000000ffffffff; //binary: 32 zeros, 32 ones
x = (x & m1 ) + ((x >> 1) & m1 ); //put count of each 2 bits into those 2 bits
x = (x & m2 ) + ((x >> 2) & m2 ); //put count of each 4 bits into those 4 bits
x = (x & m4 ) + ((x >> 4) & m4 ); //put count of each 8 bits into those 8 bits
x = (x & m8 ) + ((x >> 8) & m8 ); //put count of each 16 bits into those 16 bits
x = (x & m16) + ((x >> 16) & m16); //put count of each 32 bits into those 32 bits
x = (x & m32) + ((x >> 32) & m32); //put count of each 64 bits into those 64 bits
return x;
}
// Compute orbital indices for a single excitation
int *get_single_excitation(uint64_t *str1, uint64_t *str2, int nset) {
size_t p;
int *ia = malloc(sizeof(int) * 2);
for (p = 0; p < nset; ++p) {
size_t pp = nset - p - 1;
uint64_t str_tmp = str1[pp] ^ str2[pp];
uint64_t str_particle = str_tmp & str2[pp];
uint64_t str_hole = str_tmp & str1[pp];
if (popcount(str_particle) == 1) {
ia[1] = trailz(str_particle) + 64 * p;
}
if (popcount(str_hole) == 1) {
ia[0] = trailz(str_hole) + 64 * p;
}
}
return ia;
}
// Compute orbital indices for a double excitation
int *get_double_excitation(uint64_t *str1, uint64_t *str2, int nset) {
size_t p;
int *ijab = malloc(sizeof(int) * 4);
int particle_ind = 2;
int hole_ind = 0;
for (p = 0; p < nset; ++p) {
size_t pp = nset - p - 1;
uint64_t str_tmp = str1[pp] ^ str2[pp];
uint64_t str_particle = str_tmp & str2[pp];
uint64_t str_hole = str_tmp & str1[pp];
int n_particle = popcount(str_particle);
int n_hole = popcount(str_hole);
if (n_particle == 1) {
ijab[particle_ind] = trailz(str_particle) + 64 * p;
particle_ind++;
}
else if (n_particle == 2) {
int a = trailz(str_particle);
ijab[2] = a + 64 * p;
str_particle &= ~(1ULL << a);
int b = trailz(str_particle);
ijab[3] = b + 64 * p;
}
if (n_hole == 1) {
ijab[hole_ind] = trailz(str_hole) + 64 * p;
hole_ind++;
}
else if (n_hole == 2) {
int i = trailz(str_hole);
ijab[0] = i + 64 * p;
str_hole &= ~(1ULL << i);
int j = trailz(str_hole);
ijab[1] = j + 64 * p;
}
}
return ijab;
}
// Compute number of trailing zeros in a bit string
int trailz(uint64_t v) {
int c = 64;
// Trick to unset all bits but the first one
v &= -(int64_t) v;
if (v) c--;
if (v & 0x00000000ffffffff) c -= 32;
if (v & 0x0000ffff0000ffff) c -= 16;
if (v & 0x00ff00ff00ff00ff) c -= 8;
if (v & 0x0f0f0f0f0f0f0f0f) c -= 4;
if (v & 0x3333333333333333) c -= 2;
if (v & 0x5555555555555555) c -= 1;
return c;
}
// Function to print int as a char for debug purposes
char *int2bin(uint64_t i) {
size_t bits = sizeof(uint64_t) * CHAR_BIT;
char * str = malloc(bits + 1);
if(!str) return NULL;
str[bits] = 0;
// type punning because signed shift is implementation-defined
uint64_t u = *(uint64_t *)&i;
for(; bits--; u >>= 1)
str[bits] = u & 1 ? '1' : '0';
return str;
}
// Compute sign for a pair of creation and desctruction operators
double compute_cre_des_sign(int p, int q, uint64_t *str, int nset) {
double sign;
int nperm;
size_t i;
int pg = p / 64;
int qg = q / 64;
int pb = p % 64;
int qb = q % 64;
if (pg > qg) {
nperm = 0;
for (i = nset-pg; i < nset-qg-1; ++i) {
nperm += popcount(str[i]);
}
nperm += popcount(str[nset -1 - pg] & ((1ULL << pb) - 1));
nperm += str[nset -1 - qg] >> (qb + 1);
}
else if (pg < qg) {
nperm = 0;
for (i = nset-qg; i < nset-pg-1; ++i) {
nperm += popcount(str[i]);
}
nperm += popcount(str[nset -1 - qg] & ((1ULL << qb) - 1));
nperm += str[nset -1 - pg] >> (pb + 1);
}
else {
uint64_t mask;
if (p > q) mask = (1ULL << pb) - (1ULL << (qb + 1));
else mask = (1ULL << qb) - (1ULL << (pb + 1));
nperm = popcount(str[nset -1 - pg] & mask);
}
if (nperm % 2) sign = -1.0;
else sign = 1.0;
return sign;
}
// Compute a list of occupied orbitals for a given string
int *compute_occ_list(uint64_t *string, int nset, int norb, int nelec) {
size_t k, i;
int *occ = malloc(sizeof(int) * nelec);
int off = 0;
int occ_ind = 0;
for (k = nset; k > 0; --k) {
int i_max = ((norb - off) < 64 ? (norb - off) : 64);
for (i = 0; i < i_max; ++i) {
int i_occ = (string[k-1] >> i) & 1;
if (i_occ) {
occ[occ_ind] = i + off;
occ_ind++;
}
}
off += 64;
}
return occ;
}
// Compute a list of occupied orbitals for a given string
int *compute_vir_list(uint64_t *string, int nset, int norb, int nelec) {
size_t k, i;
int *vir = malloc(sizeof(int) * (norb-nelec));
int off = 0;
int vir_ind = 0;
for (k = nset; k > 0; --k) {
int i_max = ((norb - off) < 64 ? (norb - off) : 64);
for (i = 0; i < i_max; ++i) {
int i_occ = (string[k-1] >> i) & 1;
if (!i_occ) {
vir[vir_ind] = i + off;
vir_ind++;
}
}
off += 64;
}
return vir;
}
// Select determinants to include in the CI space
void select_strs(double *h1, double *eri, double *jk, uint64_t *eri_sorted, uint64_t *jk_sorted, int norb, int neleca, int nelecb, uint64_t *strs, double *civec, uint64_t ndet_start, uint64_t ndet_finish, double select_cutoff, uint64_t *strs_add, uint64_t* strs_add_size) {
size_t p, q, r, i, k, a, ip, jp, kp, lp, ij, iset, idet;
uint64_t max_strs_add = strs_add_size[0];
int nset = (norb + 63) / 64;
// Compute Fock intermediates
double *focka = malloc(sizeof(double) * norb * norb);
double *fockb = malloc(sizeof(double) * norb * norb);
for (p = 0; p < norb; ++p) {
for (q = 0; q < norb; ++q) {
double vja = 0.0;
double vka = 0.0;
for (i = 0; i < neleca; ++i) {
size_t iipq = i * norb * norb * norb + i * norb * norb + p * norb + q;
size_t piiq = p * norb * norb * norb + i * norb * norb + i * norb + q;
vja += eri[iipq];
vka += eri[piiq];
}
double vjb = 0.0;
double vkb = 0.0;
for (i = 0; i < nelecb; ++i) {
size_t iipq = i * norb * norb * norb + i * norb * norb + p * norb + q;
size_t piiq = p * norb * norb * norb + i * norb * norb + i * norb + q;
vjb += eri[iipq];
vkb += eri[piiq];
}
focka[p * norb + q] = h1[p * norb + q] + vja + vjb - vka;
fockb[p * norb + q] = h1[p * norb + q] + vja + vjb - vkb;
}
}
int *holes_a = malloc(sizeof(int) * norb);
int *holes_b = malloc(sizeof(int) * norb);
int *particles_a = malloc(sizeof(int) * norb);
int *particles_b = malloc(sizeof(int) * norb);
uint64_t strs_added = 0;
// Loop over determinants
for (idet = ndet_start; idet < ndet_finish; ++idet) {
uint64_t *stra = strs + idet * 2 * nset;
uint64_t *strb = strs + idet * 2 * nset + nset;
int *occsa = compute_occ_list(stra, nset, norb, neleca);
int *occsb = compute_occ_list(strb, nset, norb, nelecb);
int *virsa = compute_vir_list(stra, nset, norb, neleca);
int *virsb = compute_vir_list(strb, nset, norb, nelecb);
double tol = select_cutoff / fabs(civec[idet]);
// Single excitations
int n_holes_a = 0;
int n_holes_b = 0;
int n_particles_a = 0;
int n_particles_b = 0;
for (p = 0; p < (norb - neleca); ++p) {
i = virsa[p];
if (i < neleca) {
holes_a[n_holes_a] = i;
n_holes_a++;
}
}
for (p = 0; p < neleca; ++p) {
i = occsa[p];
if (i >= neleca) {
particles_a[n_particles_a] = i;
n_particles_a++;
}
}
for (p = 0; p < (norb - nelecb); ++p) {
i = virsb[p];
if (i < nelecb) {
holes_b[n_holes_b] = i;
n_holes_b++;
}
}
for (p = 0; p < nelecb; ++p) {
i = occsb[p];
if (i >= nelecb) {
particles_b[n_particles_b] = i;
n_particles_b++;
}
}
// TODO: recompute Fock for each |Phi_I> and make sure it matches Fock in the code below
// alpha->alpha
for (p = 0; p < neleca; ++p) {
i = occsa[p];
for (q = 0; q < (norb - neleca); ++q) {
a = virsa[q];
double fai = focka[a * norb + i];
for (r = 0; r < n_particles_a; ++r) {
k = particles_a[r];
fai += jk[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_holes_a; ++r) {
k = holes_a[r];
fai -= jk[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_particles_b; ++r) {
k = particles_b[r];
fai += eri[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_holes_b; ++r) {
k = holes_b[r];
fai -= eri[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
if (fabs(fai) > tol) {
uint64_t *tmp = toggle_bit(stra, nset, a);
uint64_t *new_str = toggle_bit(tmp, nset, i);
for (iset = 0; iset < nset; ++iset) {
// new alpha string
strs_add[strs_added * 2 * nset + iset] = new_str[iset];
// old beta string
strs_add[strs_added * 2 * nset + nset + iset] = strb[iset];
}
free(tmp);
free(new_str);
strs_added++;
}
}
}
// beta->beta
for (p = 0; p < nelecb; ++p) {
i = occsb[p];
for (q = 0; q < (norb - nelecb); ++q) {
a = virsb[q];
double fai = fockb[a * norb + i];
for (r = 0; r < n_particles_b; ++r) {
k = particles_b[r];
fai += jk[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_holes_b; ++r) {
k = holes_b[r];
fai -= jk[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_particles_a; ++r) {
k = particles_a[r];
fai += eri[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
for (r = 0; r < n_holes_a; ++r) {
k = holes_a[r];
fai -= eri[k * norb * norb * norb + k * norb * norb + a * norb + i];
}
if (fabs(fai) > tol) {
uint64_t *tmp = toggle_bit(strb, nset, a);
uint64_t *new_str = toggle_bit(tmp, nset, i);
for (iset = 0; iset < nset; ++iset) {
// old alpha string
strs_add[strs_added * 2 * nset + iset] = stra[iset];
// new beta string
strs_add[strs_added * 2 * nset + nset + iset] = new_str[iset];
}
free(tmp);
free(new_str);
strs_added++;
}
}
}
size_t ip_occ, jp_occ, kp_occ, lp_occ, ih;
// Double excitations
for (p = 0; p < norb * norb * norb * norb; ++p) {
ih = jk_sorted[p];
int aaaa_bbbb_done = (fabs(jk[ih]) < tol);
if (!aaaa_bbbb_done) {
lp = ih % norb;
ij = ih / norb;
kp = ij % norb;
ij = ij / norb;
jp = ij % norb;
ip = ij / norb;
// alpha,alpha->alpha,alpha
ip_occ = 0;
jp_occ = 0;
kp_occ = 0;
lp_occ = 0;
for (r = 0; r < neleca; ++r) {
int occ_index = occsa[r];
if (ip == occ_index) ip_occ = 1;
if (jp == occ_index) jp_occ = 1;
if (kp == occ_index) kp_occ = 1;
if (lp == occ_index) lp_occ = 1;
}
if (jp_occ && lp_occ && !ip_occ && !kp_occ) {
uint64_t *tmp = toggle_bit(stra, nset, jp);
uint64_t *new_str = toggle_bit(tmp, nset, ip);
tmp = toggle_bit(new_str, nset, lp);
new_str = toggle_bit(tmp, nset, kp);
for (iset = 0; iset < nset; ++iset) {
strs_add[strs_added * 2 * nset + iset] = new_str[iset];
strs_add[strs_added * 2 * nset + nset + iset] = strb[iset];
}
free(tmp);
free(new_str);
strs_added++;
}
// beta,beta->beta,beta
ip_occ = 0;
jp_occ = 0;
kp_occ = 0;
lp_occ = 0;
for (r = 0; r < nelecb; ++r) {
int occ_index = occsb[r];
if (ip == occ_index) ip_occ = 1;
if (jp == occ_index) jp_occ = 1;
if (kp == occ_index) kp_occ = 1;
if (lp == occ_index) lp_occ = 1;
}
if (jp_occ && lp_occ && !ip_occ && !kp_occ) {
uint64_t *tmp = toggle_bit(strb, nset, jp);
uint64_t *new_str = toggle_bit(tmp, nset, ip);
tmp = toggle_bit(new_str, nset, lp);
new_str = toggle_bit(tmp, nset, kp);
for (iset = 0; iset < nset; ++iset) {
strs_add[strs_added * 2 * nset + iset] = stra[iset];
strs_add[strs_added * 2 * nset + nset + iset] = new_str[iset];
}
free(tmp);
free(new_str);
strs_added++;
}
}
// alpha,beta->alpha,beta
ih = eri_sorted[p];
int aabb_done = (fabs(eri[ih]) < tol);
if (!aabb_done) {
lp = ih % norb;
ij = ih / norb;
kp = ij % norb;
ij = ij / norb;
jp = ij % norb;
ip = ij / norb;
ip_occ = 0;
jp_occ = 0;
kp_occ = 0;
lp_occ = 0;
for (r = 0; r < neleca; ++r) {
int occ_index = occsa[r];
if (ip == occ_index) ip_occ = 1;
if (jp == occ_index) jp_occ = 1;
}
for (r = 0; r < nelecb; ++r) {
int occ_index = occsb[r];
if (kp == occ_index) kp_occ = 1;
if (lp == occ_index) lp_occ = 1;
}
if (jp_occ && lp_occ && !ip_occ && !kp_occ) {
uint64_t *tmp = toggle_bit(stra, nset, jp);
uint64_t *new_str_a = toggle_bit(tmp, nset, ip);
tmp = toggle_bit(strb, nset, lp);
uint64_t *new_str_b = toggle_bit(tmp, nset, kp);
for (iset = 0; iset < nset; ++iset) {
strs_add[strs_added * 2 * nset + iset] = new_str_a[iset];
strs_add[strs_added * 2 * nset + nset + iset] = new_str_b[iset];
}
free(tmp);
free(new_str_a);
free(new_str_b);
strs_added++;
}
}
// Break statement
if (aaaa_bbbb_done && aabb_done) {
break;
}
}
free(occsa);
free(occsb);
free(virsa);
free(virsb);
if (strs_added > max_strs_add) {
printf("\nError: Number of selected strings is greater than the size of the buffer array (%ld vs %ld).\n", strs_added, max_strs_add);
exit(EXIT_FAILURE);
}
} // end loop over determinants
free(focka);
free(fockb);
free(holes_a);
free(holes_b);
free(particles_a);
free(particles_b);
strs_add_size[0] = strs_added;
}
// Toggle bit at a specified position
uint64_t *toggle_bit(uint64_t *str, int nset, int p) {
size_t i;
uint64_t *new_str = malloc(sizeof(uint64_t) * nset);
for (i = 0; i < nset; ++i) {
new_str[i] = str[i];
}
int p_set = p / 64;
int p_rel = p % 64;
new_str[nset - p_set - 1] ^= 1ULL << p_rel;
return new_str;
}
// Compares two string indices and determines the order
int order(uint64_t *strs_i, uint64_t *strs_j, int nset) {
size_t i;
for (i = 0; i < nset; ++i) {
if (strs_i[i] > strs_j[i]) return 1;
else if (strs_j[i] > strs_i[i]) return -1;
}
return 0;
}
// Recursive quick sort of string array indices
void qsort_idx(uint64_t *strs, uint64_t *idx, uint64_t *nstrs_, int nset, uint64_t *new_idx) {
size_t p;
uint64_t nstrs = nstrs_[0];
if (nstrs <= 1) {
for (p = 0; p < nstrs; ++p) new_idx[p] = idx[p];
}
else {
uint64_t ref = idx[nstrs - 1];
uint64_t *group_lt = malloc(sizeof(uint64_t) * nstrs);
uint64_t *group_gt = malloc(sizeof(uint64_t) * nstrs);
uint64_t group_lt_nstrs = 0;
uint64_t group_gt_nstrs = 0;
for (p = 0; p < (nstrs - 1); ++p) {
uint64_t i = idx[p];
uint64_t *stri = strs + i * nset;
uint64_t *strj = strs + ref * nset;
int c = order(stri, strj, nset);
if (c == -1) {
group_lt[group_lt_nstrs] = i;
group_lt_nstrs++;
}
else if (c == 1) {
group_gt[group_gt_nstrs] = i;
group_gt_nstrs++;
}
}
uint64_t *new_idx_lt = malloc(sizeof(uint64_t) * group_lt_nstrs);
uint64_t *new_idx_gt = malloc(sizeof(uint64_t) * group_gt_nstrs);
qsort_idx(strs, group_lt, &group_lt_nstrs, nset, new_idx_lt);
qsort_idx(strs, group_gt, &group_gt_nstrs, nset, new_idx_gt);
nstrs = group_lt_nstrs + group_gt_nstrs + 1;
nstrs_[0] = nstrs;
for (p = 0; p < nstrs; ++p) {
if (p < group_lt_nstrs) new_idx[p] = new_idx_lt[p];
else if (p == group_lt_nstrs) new_idx[p] = ref;
else new_idx[p] = new_idx_gt[p - group_lt_nstrs - 1];
}
free(new_idx_lt);
free(new_idx_gt);
free(group_lt);
free(group_gt);
}
}
// Helper function to perform recursive sort (nset is a total number of strings)
void argunique(uint64_t *strs, uint64_t *sort_idx, uint64_t *nstrs_, int nset) {
size_t p;
uint64_t *init_idx = malloc(sizeof(uint64_t) * nstrs_[0]);
for (p = 0; p < nstrs_[0]; ++p) init_idx[p] = p;
qsort_idx(strs, init_idx, nstrs_, nset, sort_idx);
free(init_idx);
}
// Computes C' = S2 * C in the selected CI basis
void contract_ss_c(int norb, int neleca, int nelecb, uint64_t *strs, double *civec, uint64_t ndet, double *ci1) {
int *ts = malloc(sizeof(int) * ndet);
#pragma omp parallel
{
size_t ip, jp, p, q;
int nset = (norb + 63) / 64;
// Calculate excitation level for prescreening
ts[0] = 0;
uint64_t *str1a = strs;
uint64_t *str1b = strs + nset;
#pragma omp for schedule(static)
for (ip = 1; ip < ndet; ++ip) {
uint64_t *stria = strs + ip * 2 * nset;
uint64_t *strib = strs + ip * 2 * nset + nset;
ts[ip] = (n_excitations(stria, str1a, nset) + n_excitations(strib, str1b, nset));
}
// Loop over pairs of determinants
#pragma omp for schedule(static)
for (ip = 0; ip < ndet; ++ip) {
for (jp = 0; jp < ndet; ++jp) {
if (abs(ts[ip] - ts[jp]) < 3) {
uint64_t *stria = strs + ip * 2 * nset;
uint64_t *strib = strs + ip * 2 * nset + nset;
uint64_t *strja = strs + jp * 2 * nset;
uint64_t *strjb = strs + jp * 2 * nset + nset;
int n_excit_a = n_excitations(stria, strja, nset);
int n_excit_b = n_excitations(strib, strjb, nset);
// Diagonal term
if (ip == jp) {
double apb = (double) (neleca + nelecb);
double amb = (double) (neleca - nelecb);
double prefactor = apb / 2.0 + amb * amb / 4.0;
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
for (p = 0; p < neleca; ++p) {
int pa = occsa[p];
for (q = 0; q < nelecb; ++q) {
int qb = occsb[q];
if (pa == qb) prefactor -= 1.0;
}
}
ci1[ip] += prefactor * civec[ip];
free(occsa);
free(occsb);
}
// Double excitation
else if ((n_excit_a + n_excit_b) == 2) {
int i, j, a, b;
// alpha,beta->alpha,beta
if (n_excit_a == n_excit_b) {
int *ia = get_single_excitation(stria, strja, nset);
int *jb = get_single_excitation(strib, strjb, nset);
i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1];
if (i == b && j == a) {
double sign = compute_cre_des_sign(a, i, stria, nset);
sign *= compute_cre_des_sign(b, j, strib, nset);
ci1[ip] -= sign * civec[jp];
}
free(ia);
free(jb);
}
}
} // end if over ts
} // end loop over jp
} // end loop over ip
} // end omp
free(ts);
}
// Computes C' = H * C and C'' = S2 * C simultaneously in the selected CI basis
void contract_h_c_ss_c(double *h1, double *eri, int norb, int neleca, int nelecb, uint64_t *strs, double *civec, double *hdiag, uint64_t ndet, double *ci1, double *ci2) {
int *ts = malloc(sizeof(int) * ndet);
#pragma omp parallel
{
size_t ip, jp, p, q;
int nset = (norb + 63) / 64;
// Calculate excitation level for prescreening
ts[0] = 0;
uint64_t *str1a = strs;
uint64_t *str1b = strs + nset;
#pragma omp for schedule(static)
for (ip = 1; ip < ndet; ++ip) {
uint64_t *stria = strs + ip * 2 * nset;
uint64_t *strib = strs + ip * 2 * nset + nset;
ts[ip] = (n_excitations(stria, str1a, nset) + n_excitations(strib, str1b, nset));
}
// Loop over pairs of determinants
#pragma omp for schedule(static)
for (ip = 0; ip < ndet; ++ip) {
for (jp = 0; jp < ndet; ++jp) {
if (abs(ts[ip] - ts[jp]) < 3) {
uint64_t *stria = strs + ip * 2 * nset;
uint64_t *strib = strs + ip * 2 * nset + nset;
uint64_t *strja = strs + jp * 2 * nset;
uint64_t *strjb = strs + jp * 2 * nset + nset;
int n_excit_a = n_excitations(stria, strja, nset);
int n_excit_b = n_excitations(strib, strjb, nset);
// Diagonal term
if (ip == jp) {
ci1[ip] += hdiag[ip] * civec[ip];
// S^2
double apb = (double) (neleca + nelecb);
double amb = (double) (neleca - nelecb);
double prefactor = apb / 2.0 + amb * amb / 4.0;
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
for (p = 0; p < neleca; ++p) {
int pa = occsa[p];
for (q = 0; q < nelecb; ++q) {
int qb = occsb[q];
if (pa == qb) prefactor -= 1.0;
}
}
ci2[ip] += prefactor * civec[ip];
free(occsa);
free(occsb);
}
// Single excitation
else if ((n_excit_a + n_excit_b) == 1) {
int *ia;
// alpha->alpha
if (n_excit_b == 0) {
ia = get_single_excitation(stria, strja, nset);
int i = ia[0];
int a = ia[1];
double sign = compute_cre_des_sign(a, i, stria, nset);
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
double fai = h1[a * norb + i];
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k;
fai += eri[kkai] - eri[kiak];
}
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
fai += eri[kkai];
}
if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp];
free(occsa);
free(occsb);
}
// beta->beta
else if (n_excit_a == 0) {
ia = get_single_excitation(strib, strjb, nset);
int i = ia[0];
int a = ia[1];
double sign = compute_cre_des_sign(a, i, strib, nset);
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
double fai = h1[a * norb + i];
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k;
fai += eri[kkai] - eri[kiak];
}
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i;
fai += eri[kkai];
}
if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp];
free(occsa);
free(occsb);
}
free(ia);
}
// Double excitation
else if ((n_excit_a + n_excit_b) == 2) {
int i, j, a, b;
// alpha,alpha->alpha,alpha
if (n_excit_b == 0) {
int *ijab = get_double_excitation(stria, strja, nset);
i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3];
double v, sign;
int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i;
int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j;
if (a > j || i > b) {
v = eri[ajbi] - eri[aibj];
sign = compute_cre_des_sign(b, i, stria, nset);
sign *= compute_cre_des_sign(a, j, stria, nset);
}
else {
v = eri[aibj] - eri[ajbi];
sign = compute_cre_des_sign(b, j, stria, nset);
sign *= compute_cre_des_sign(a, i, stria, nset);
}
if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp];
free(ijab);
}
// beta,beta->beta,beta
else if (n_excit_a == 0) {
int *ijab = get_double_excitation(strib, strjb, nset);
i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3];
double v, sign;
int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i;
int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j;
if (a > j || i > b) {
v = eri[ajbi] - eri[aibj];
sign = compute_cre_des_sign(b, i, strib, nset);
sign *= compute_cre_des_sign(a, j, strib, nset);
}
else {
v = eri[aibj] - eri[ajbi];
sign = compute_cre_des_sign(b, j, strib, nset);
sign *= compute_cre_des_sign(a, i, strib, nset);
}
if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp];
free(ijab);
}
// alpha,beta->alpha,beta
else {
int *ia = get_single_excitation(stria, strja, nset);
int *jb = get_single_excitation(strib, strjb, nset);
i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1];
double v = eri[a * norb * norb * norb + i * norb * norb + b * norb + j];
double sign = compute_cre_des_sign(a, i, stria, nset);
sign *= compute_cre_des_sign(b, j, strib, nset);
if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp];
// S^2
if (i == b && j == a) {
ci2[ip] -= sign * civec[jp];
}
free(ia);
free(jb);
}
}
} // end if over ts
} // end loop over jp
} // end loop over ip
} // end omp
free(ts);
}
// 2-RDM is sorted in physicists notation: gamma_pqsr=<\Phi|a_p^dag a_q^dag a_r a_s|\Phi>
void compute_rdm12s(int norb, int neleca, int nelecb, uint64_t *strs, double *civec, uint64_t ndet, double *rdm1a, double *rdm1b, double *rdm2aa, double *rdm2ab, double *rdm2bb) {
#pragma omp parallel
{
size_t ip, jp, p, q, r, s;
int nset = (norb + 63) / 64;
double ci_sq = 0.0;
double *rdm1a_private = malloc(sizeof(double) * norb * norb);
double *rdm1b_private = malloc(sizeof(double) * norb * norb);
double *rdm2aa_private = malloc(sizeof(double) * norb * norb * norb * norb);
double *rdm2ab_private = malloc(sizeof(double) * norb * norb * norb * norb);
double *rdm2bb_private = malloc(sizeof(double) * norb * norb * norb * norb);
for (p = 0; p < norb * norb; ++p) {
rdm1a_private[p] = 0.0;
rdm1b_private[p] = 0.0;
}
for (p = 0; p < norb * norb * norb * norb; ++p) {
rdm2aa_private[p] = 0.0;
rdm2ab_private[p] = 0.0;
rdm2bb_private[p] = 0.0;
}
// Loop over pairs of determinants
#pragma omp for schedule(static)
for (ip = 0; ip < ndet; ++ip) {
for (jp = 0; jp < ndet; ++jp) {
uint64_t *stria = strs + ip * 2 * nset;
uint64_t *strib = strs + ip * 2 * nset + nset;
uint64_t *strja = strs + jp * 2 * nset;
uint64_t *strjb = strs + jp * 2 * nset + nset;
int n_excit_a = n_excitations(stria, strja, nset);
int n_excit_b = n_excitations(strib, strjb, nset);
// Diagonal term
if (ip == jp) {
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
ci_sq = civec[ip] * civec[ip];
// Diagonal rdm1_aa
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int kk = k * norb + k;
rdm1a_private[kk] += ci_sq;
}
// Diagonal rdm1_bb
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int kk = k * norb + k;
rdm1b_private[kk] += ci_sq;
}
// Diagonal rdm2_aaaa
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
for (q = 0; q < neleca; ++q) {
int j = occsa[q];
int kjkj = k * norb * norb * norb + j * norb * norb + k * norb + j;
int kjjk = k * norb * norb * norb + j * norb * norb + j * norb + k;
rdm2aa_private[kjkj] += ci_sq;
rdm2aa_private[kjjk] -= ci_sq;
}
// Diagonal rdm2_abab
for (q = 0; q < nelecb; ++q) {
int j = occsb[q];
int kjkj = k * norb * norb * norb + j * norb * norb + k * norb + j;
rdm2ab_private[kjkj] += ci_sq;
}
}
// Diagonal rdm2_bbbb
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
for (q = 0; q < nelecb; ++q) {
int j = occsb[q];
int kjkj = k * norb * norb * norb + j * norb * norb + k * norb + j;
int kjjk = k * norb * norb * norb + j * norb * norb + j * norb + k;
rdm2bb_private[kjkj] += ci_sq;
rdm2bb_private[kjjk] -= ci_sq;
}
}
free(occsa);
free(occsb);
}
// Single excitation
else if ((n_excit_a + n_excit_b) == 1) {
int *ia;
// alpha->alpha
if (n_excit_b == 0) {
ia = get_single_excitation(stria, strja, nset);
int i = ia[0];
int a = ia[1];
double sign = compute_cre_des_sign(a, i, stria, nset);
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
ci_sq = sign * civec[ip] * civec[jp];
// rdm1_aa
rdm1a_private[a * norb + i] += ci_sq;
// rdm2_aaaa
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int akik = a * norb * norb * norb + k * norb * norb + i * norb + k;
int akki = a * norb * norb * norb + k * norb * norb + k * norb + i;
int kaki = k * norb * norb * norb + a * norb * norb + k * norb + i;
int kaik = k * norb * norb * norb + a * norb * norb + i * norb + k;
rdm2aa_private[akik] += ci_sq;
rdm2aa_private[akki] -= ci_sq;
rdm2aa_private[kaik] -= ci_sq;
rdm2aa_private[kaki] += ci_sq;
}
// rdm2_abab
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int akik = a * norb * norb * norb + k * norb * norb + i * norb + k;
rdm2ab_private[akik] += ci_sq;
}
free(occsa);
free(occsb);
}
// beta->beta
else if (n_excit_a == 0) {
ia = get_single_excitation(strib, strjb, nset);
int i = ia[0];
int a = ia[1];
double sign = compute_cre_des_sign(a, i, strib, nset);
int *occsa = compute_occ_list(stria, nset, norb, neleca);
int *occsb = compute_occ_list(strib, nset, norb, nelecb);
ci_sq = sign * civec[ip] * civec[jp];
// rdm1_bb
rdm1b_private[a * norb + i] += ci_sq;
// rdm2_bbbb
for (p = 0; p < nelecb; ++p) {
int k = occsb[p];
int akik = a * norb * norb * norb + k * norb * norb + i * norb + k;
int akki = a * norb * norb * norb + k * norb * norb + k * norb + i;
int kaki = k * norb * norb * norb + a * norb * norb + k * norb + i;
int kaik = k * norb * norb * norb + a * norb * norb + i * norb + k;
rdm2bb_private[akik] += ci_sq;
rdm2bb_private[akki] -= ci_sq;
rdm2bb_private[kaik] -= ci_sq;
rdm2bb_private[kaki] += ci_sq;
}
// rdm2_abab
for (p = 0; p < neleca; ++p) {
int k = occsa[p];
int kaki = k * norb * norb * norb + a * norb * norb + k * norb + i;
rdm2ab_private[kaki] += ci_sq;
}
free(occsa);
free(occsb);
}
free(ia);
}
// Double excitation
else if ((n_excit_a + n_excit_b) == 2) {
int i, j, a, b;
// rdm2_aaaa
if (n_excit_b == 0) {
int *ijab = get_double_excitation(stria, strja, nset);
i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3];
double sign;
int baij = b * norb * norb * norb + a * norb * norb + i * norb + j;
int baji = b * norb * norb * norb + a * norb * norb + j * norb + i;
int abij = a * norb * norb * norb + b * norb * norb + i * norb + j;
int abji = a * norb * norb * norb + b * norb * norb + j * norb + i;
if (a > j || i > b) {
sign = compute_cre_des_sign(b, i, stria, nset);
sign *= compute_cre_des_sign(a, j, stria, nset);
ci_sq = sign * civec[ip] * civec[jp];
rdm2aa_private[baij] += ci_sq;
rdm2aa_private[baji] -= ci_sq;
rdm2aa_private[abij] -= ci_sq;
rdm2aa_private[abji] += ci_sq;
}
else {
sign = compute_cre_des_sign(b, j, stria, nset);
sign *= compute_cre_des_sign(a, i, stria, nset);
ci_sq = sign * civec[ip] * civec[jp];
rdm2aa_private[baij] -= ci_sq;
rdm2aa_private[baji] += ci_sq;
rdm2aa_private[abij] += ci_sq;
rdm2aa_private[abji] -= ci_sq;
}
free(ijab);
}
// rdm2_bbbb
else if (n_excit_a == 0) {
int *ijab = get_double_excitation(strib, strjb, nset);
i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3];
double v, sign;
int baij = b * norb * norb * norb + a * norb * norb + i * norb + j;
int baji = b * norb * norb * norb + a * norb * norb + j * norb + i;
int abij = a * norb * norb * norb + b * norb * norb + i * norb + j;
int abji = a * norb * norb * norb + b * norb * norb + j * norb + i;
if (a > j || i > b) {
sign = compute_cre_des_sign(b, i, strib, nset);
sign *= compute_cre_des_sign(a, j, strib, nset);
ci_sq = sign * civec[ip] * civec[jp];
rdm2bb_private[baij] += ci_sq;
rdm2bb_private[baji] -= ci_sq;
rdm2bb_private[abij] -= ci_sq;
rdm2bb_private[abji] += ci_sq;
}
else {
sign = compute_cre_des_sign(b, j, strib, nset);
sign *= compute_cre_des_sign(a, i, strib, nset);
ci_sq = sign * civec[ip] * civec[jp];
rdm2bb_private[baij] -= ci_sq;
rdm2bb_private[baji] += ci_sq;
rdm2bb_private[abij] += ci_sq;
rdm2bb_private[abji] -= ci_sq;
}
free(ijab);
}
// rdm2_abab
else {
int *ia = get_single_excitation(stria, strja, nset);
int *jb = get_single_excitation(strib, strjb, nset);
i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1];
double sign = compute_cre_des_sign(a, i, stria, nset);
sign *= compute_cre_des_sign(b, j, strib, nset);
ci_sq = sign * civec[ip] * civec[jp];
int abij = a * norb * norb * norb + b * norb * norb + i * norb + j;
rdm2ab_private[abij] += ci_sq;
free(ia);
free(jb);
}
}
} // end loop over jp
} // end loop over ip
#pragma omp critical
{
for (p = 0; p < norb * norb; ++p) {
rdm1a[p] += rdm1a_private[p];
rdm1b[p] += rdm1b_private[p];
}
for (p = 0; p < norb * norb * norb * norb; ++p) {
rdm2aa[p] += rdm2aa_private[p];
rdm2ab[p] += rdm2ab_private[p];
rdm2bb[p] += rdm2bb_private[p];
}
}
free(rdm1a_private);
free(rdm1b_private);
free(rdm2aa_private);
free(rdm2ab_private);
free(rdm2bb_private);
} // end omp
}
|
prospector.c | #define _POSIX_C_SOURCE 200112L
#include <math.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <fcntl.h>
#include <dlfcn.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/time.h>
#define ABI __attribute__((sysv_abi))
#define countof(a) ((int)(sizeof(a) / sizeof(0[a])))
static uint64_t
xoroshiro128plus(uint64_t s[2])
{
uint64_t s0 = s[0];
uint64_t s1 = s[1];
uint64_t result = s0 + s1;
s1 ^= s0;
s[0] = ((s0 << 24) | (s0 >> 40)) ^ s1 ^ (s1 << 16);
s[1] = (s1 << 37) | (s1 >> 27);
return result;
}
enum hf_type {
/* 32 bits */
HF32_XOR, // x ^= const32
HF32_MUL, // x *= const32 (odd)
HF32_ADD, // x += const32
HF32_ROT, // x = (x << const5) | (x >> (32 - const5))
HF32_NOT, // x = ~x
HF32_XORL, // x ^= x << const5
HF32_XORR, // x ^= x >> const5
HF32_ADDL, // x += x << const5
HF32_SUBL, // x -= x << const5
/* 64 bits */
HF64_XOR,
HF64_MUL,
HF64_ADD,
HF64_ROT,
HF64_NOT,
HF64_XORL,
HF64_XORR,
HF64_ADDL,
HF64_SUBL,
};
static const char hf_names[][8] = {
[HF32_XOR] = "32xor",
[HF32_MUL] = "32mul",
[HF32_ADD] = "32add",
[HF32_ROT] = "32rot",
[HF32_NOT] = "32not",
[HF32_XORL] = "32xorl",
[HF32_XORR] = "32xorr",
[HF32_ADDL] = "32addl",
[HF32_SUBL] = "32subl",
[HF64_XOR] = "64xor",
[HF64_MUL] = "64mul",
[HF64_ADD] = "64add",
[HF64_ROT] = "64rot",
[HF64_NOT] = "64not",
[HF64_XORL] = "64xorl",
[HF64_XORR] = "64xorr",
[HF64_ADDL] = "64addl",
[HF64_SUBL] = "64subl",
};
#define FOP_LOCKED (1 << 0)
struct hf_op {
enum hf_type type;
uint64_t constant;
int flags;
};
/* Randomize the constants of the given hash operation.
*/
static void
hf_randomize(struct hf_op *op, uint64_t s[2])
{
uint64_t r = xoroshiro128plus(s);
switch (op->type) {
case HF32_NOT:
case HF64_NOT:
op->constant = 0;
break;
case HF32_XOR:
case HF32_ADD:
op->constant = (uint32_t)r;
break;
case HF32_MUL:
op->constant = (uint32_t)r | 1;
break;
case HF32_ROT:
case HF32_XORL:
case HF32_XORR:
case HF32_ADDL:
case HF32_SUBL:
op->constant = 1 + r % 31;
break;
case HF64_XOR:
case HF64_ADD:
op->constant = r;
break;
case HF64_MUL:
op->constant = r | 1;
break;
case HF64_ROT:
case HF64_XORL:
case HF64_XORR:
case HF64_ADDL:
case HF64_SUBL:
op->constant = 1 + r % 63;
break;
}
}
#define F_U64 (1 << 0)
#define F_TINY (1 << 1) // don't use big constants
static void
hf_gen(struct hf_op *op, uint64_t s[2], int flags)
{
uint64_t r = xoroshiro128plus(s);
int min = flags & F_TINY ? 3 : 0;
op->type = (r % (9 - min)) + min + (flags & F_U64 ? 9 : 0);
hf_randomize(op, s);
}
/* Return 1 if these operations may be adjacent
*/
static int
hf_type_valid(enum hf_type a, enum hf_type b)
{
switch (a) {
case HF32_NOT:
case HF32_XOR:
case HF32_MUL:
case HF32_ADD:
case HF32_ROT:
case HF64_NOT:
case HF64_XOR:
case HF64_MUL:
case HF64_ADD:
case HF64_ROT:
return a != b;
case HF32_XORL:
case HF32_XORR:
case HF32_ADDL:
case HF32_SUBL:
case HF64_XORL:
case HF64_XORR:
case HF64_ADDL:
case HF64_SUBL:
return 1;
}
abort();
}
static void
hf_genfunc(struct hf_op *ops, int n, int flags, uint64_t s[2])
{
hf_gen(ops, s, flags);
for (int i = 1; i < n; i++) {
do {
hf_gen(ops + i, s, flags);
} while (!hf_type_valid(ops[i - 1].type, ops[i].type));
}
}
/* Randomize the parameters of the given functoin.
*/
static void
hf_randfunc(struct hf_op *ops, int n, uint64_t s[2])
{
for (int i = 0; i < n; i++)
if (!(ops[i].flags & FOP_LOCKED))
hf_randomize(ops + i, s);
}
static void
hf_print(const struct hf_op *op, char *buf)
{
unsigned long long c = op->constant;
switch (op->type) {
case HF32_NOT:
case HF64_NOT:
sprintf(buf, "x = ~x;");
break;
case HF32_XOR:
sprintf(buf, "x ^= UINT32_C(0x%08llx);", c);
break;
case HF32_MUL:
sprintf(buf, "x *= UINT32_C(0x%08llx);", c);
break;
case HF32_ADD:
sprintf(buf, "x += UINT32_C(0x%08llx);", c);
break;
case HF32_ROT:
sprintf(buf, "x = (x << %llu) | (x >> %lld);", c, 32 - c);
break;
case HF32_XORL:
sprintf(buf, "x ^= x << %llu;", c);
break;
case HF32_XORR:
sprintf(buf, "x ^= x >> %llu;", c);
break;
case HF32_ADDL:
sprintf(buf, "x += x << %llu;", c);
break;
case HF32_SUBL:
sprintf(buf, "x -= x << %llu;", c);
break;
case HF64_XOR:
sprintf(buf, "x ^= UINT64_C(0x%016llx);", c);
break;
case HF64_MUL:
sprintf(buf, "x *= UINT64_C(0x%016llx);", c);
break;
case HF64_ADD:
sprintf(buf, "x += UINT64_C(0x%016llx);", c);
break;
case HF64_ROT:
sprintf(buf, "x = (x << %llu) | (x >> %lld);", c, 64 - c);
break;
case HF64_XORL:
sprintf(buf, "x ^= x << %llu;", c);
break;
case HF64_XORR:
sprintf(buf, "x ^= x >> %llu;", c);
break;
case HF64_ADDL:
sprintf(buf, "x += x << %llu;", c);
break;
case HF64_SUBL:
sprintf(buf, "x -= x << %llu;", c);
break;
}
}
static void
hf_printfunc(const struct hf_op *ops, int n, FILE *f)
{
if (ops[0].type <= HF32_SUBL)
fprintf(f, "uint32_t\nhash(uint32_t x)\n{\n");
else
fprintf(f, "uint64_t\nhash(uint64_t x)\n{\n");
for (int i = 0; i < n; i++) {
char buf[64];
hf_print(ops + i, buf);
fprintf(f, " %s\n", buf);
}
fprintf(f, " return x;\n}\n");
}
static unsigned char *
hf_compile(const struct hf_op *ops, int n, unsigned char *buf)
{
if (ops[0].type <= HF32_SUBL) {
/* mov eax, edi*/
*buf++ = 0x89;
*buf++ = 0xf8;
} else {
/* mov rax, rdi*/
*buf++ = 0x48;
*buf++ = 0x89;
*buf++ = 0xf8;
}
for (int i = 0; i < n; i++) {
switch (ops[i].type) {
case HF32_NOT:
/* not eax */
*buf++ = 0xf7;
*buf++ = 0xd0;
break;
case HF32_XOR:
/* xor eax, imm32 */
*buf++ = 0x35;
*buf++ = ops[i].constant >> 0;
*buf++ = ops[i].constant >> 8;
*buf++ = ops[i].constant >> 16;
*buf++ = ops[i].constant >> 24;
break;
case HF32_MUL:
/* imul eax, eax, imm32 */
*buf++ = 0x69;
*buf++ = 0xc0;
*buf++ = ops[i].constant >> 0;
*buf++ = ops[i].constant >> 8;
*buf++ = ops[i].constant >> 16;
*buf++ = ops[i].constant >> 24;
break;
case HF32_ADD:
/* add eax, imm32 */
*buf++ = 0x05;
*buf++ = ops[i].constant >> 0;
*buf++ = ops[i].constant >> 8;
*buf++ = ops[i].constant >> 16;
*buf++ = ops[i].constant >> 24;
break;
case HF32_ROT:
/* rol eax, imm8 */
*buf++ = 0xc1;
*buf++ = 0xc0;
*buf++ = ops[i].constant;
break;
case HF32_XORL:
/* mov edi, eax */
*buf++ = 0x89;
*buf++ = 0xc7;
/* shl edi, imm8 */
*buf++ = 0xc1;
*buf++ = 0xe7;
*buf++ = ops[i].constant;
/* xor eax, edi */
*buf++ = 0x31;
*buf++ = 0xf8;
break;
case HF32_XORR:
/* mov edi, eax */
*buf++ = 0x89;
*buf++ = 0xc7;
/* shr edi, imm8 */
*buf++ = 0xc1;
*buf++ = 0xef;
*buf++ = ops[i].constant;
/* xor eax, edi */
*buf++ = 0x31;
*buf++ = 0xf8;
break;
case HF32_ADDL:
/* mov edi, eax */
*buf++ = 0x89;
*buf++ = 0xc7;
/* shl edi, imm8 */
*buf++ = 0xc1;
*buf++ = 0xe7;
*buf++ = ops[i].constant;
/* add eax, edi */
*buf++ = 0x01;
*buf++ = 0xf8;
break;
case HF32_SUBL:
/* mov edi, eax */
*buf++ = 0x89;
*buf++ = 0xc7;
/* shl edi, imm8 */
*buf++ = 0xc1;
*buf++ = 0xe7;
*buf++ = ops[i].constant;
/* sub eax, edi */
*buf++ = 0x29;
*buf++ = 0xf8;
break;
case HF64_NOT:
/* not rax */
*buf++ = 0x48;
*buf++ = 0xf7;
*buf++ = 0xd0;
break;
case HF64_XOR:
/* mov rdi, imm64 */
*buf++ = 0x48;
*buf++ = 0xbf;
*buf++ = ops[i].constant >> 0;
*buf++ = ops[i].constant >> 8;
*buf++ = ops[i].constant >> 16;
*buf++ = ops[i].constant >> 24;
*buf++ = ops[i].constant >> 32;
*buf++ = ops[i].constant >> 40;
*buf++ = ops[i].constant >> 48;
*buf++ = ops[i].constant >> 56;
/* xor rax, rdi */
*buf++ = 0x48;
*buf++ = 0x31;
*buf++ = 0xf8;
break;
case HF64_MUL:
/* mov rdi, imm64 */
*buf++ = 0x48;
*buf++ = 0xbf;
*buf++ = ops[i].constant >> 0;
*buf++ = ops[i].constant >> 8;
*buf++ = ops[i].constant >> 16;
*buf++ = ops[i].constant >> 24;
*buf++ = ops[i].constant >> 32;
*buf++ = ops[i].constant >> 40;
*buf++ = ops[i].constant >> 48;
*buf++ = ops[i].constant >> 56;
/* imul rax, rdi */
*buf++ = 0x48;
*buf++ = 0x0f;
*buf++ = 0xaf;
*buf++ = 0xc7;
break;
case HF64_ADD:
/* mov rdi, imm64 */
*buf++ = 0x48;
*buf++ = 0xbf;
*buf++ = ops[i].constant >> 0;
*buf++ = ops[i].constant >> 8;
*buf++ = ops[i].constant >> 16;
*buf++ = ops[i].constant >> 24;
*buf++ = ops[i].constant >> 32;
*buf++ = ops[i].constant >> 40;
*buf++ = ops[i].constant >> 48;
*buf++ = ops[i].constant >> 56;
/* add rax, rdi */
*buf++ = 0x48;
*buf++ = 0x01;
*buf++ = 0xf8;
break;
case HF64_ROT:
/* rol rax, imm8 */
*buf++ = 0x48;
*buf++ = 0xc1;
*buf++ = 0xc0;
*buf++ = ops[i].constant;
break;
case HF64_XORL:
/* mov edi, eax */
*buf++ = 0x48;
*buf++ = 0x89;
*buf++ = 0xc7;
/* shl rdi, imm8 */
*buf++ = 0x48;
*buf++ = 0xc1;
*buf++ = 0xe7;
*buf++ = ops[i].constant;
/* xor rax, rdi */
*buf++ = 0x48;
*buf++ = 0x31;
*buf++ = 0xf8;
break;
case HF64_XORR:
/* mov rdi, rax */
*buf++ = 0x48;
*buf++ = 0x89;
*buf++ = 0xc7;
/* shr rdi, imm8 */
*buf++ = 0x48;
*buf++ = 0xc1;
*buf++ = 0xef;
*buf++ = ops[i].constant;
/* xor rax, rdi */
*buf++ = 0x48;
*buf++ = 0x31;
*buf++ = 0xf8;
break;
case HF64_ADDL:
/* mov rdi, rax */
*buf++ = 0x48;
*buf++ = 0x89;
*buf++ = 0xc7;
/* shl rdi, imm8 */
*buf++ = 0x48;
*buf++ = 0xc1;
*buf++ = 0xe7;
*buf++ = ops[i].constant;
/* add rax, rdi */
*buf++ = 0x48;
*buf++ = 0x01;
*buf++ = 0xf8;
break;
case HF64_SUBL:
/* mov rdi, rax */
*buf++ = 0x48;
*buf++ = 0x89;
*buf++ = 0xc7;
/* shl rdi, imm8 */
*buf++ = 0x48;
*buf++ = 0xc1;
*buf++ = 0xe7;
*buf++ = ops[i].constant;
/* sub rax, rdi */
*buf++ = 0x48;
*buf++ = 0x29;
*buf++ = 0xf8;
break;
}
}
/* ret */
*buf++ = 0xc3;
return buf;
}
static void *
execbuf_alloc(void)
{
int fd = open("/dev/zero", O_RDWR);
if (fd == -1) {
fprintf(stderr, "prospector: %s\n", strerror(errno));
exit(EXIT_FAILURE);
}
void *p = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
close(fd);
if (p == MAP_FAILED) {
fprintf(stderr, "prospector: %s\n", strerror(errno));
exit(EXIT_FAILURE);
}
return p;
}
static enum {
WXR_UNKNOWN, WXR_ENABLED, WXR_DISABLED
} wxr_enabled = WXR_UNKNOWN;
static void
execbuf_lock(void *buf)
{
switch (wxr_enabled) {
case WXR_UNKNOWN:
if (!mprotect(buf, 4096, PROT_READ | PROT_WRITE | PROT_EXEC)) {
wxr_enabled = WXR_DISABLED;
return;
}
wxr_enabled = WXR_ENABLED;
/* FALLTHROUGH */
case WXR_ENABLED:
mprotect(buf, 4096, PROT_READ | PROT_EXEC);
break;
case WXR_DISABLED:
break;
}
}
static void
execbuf_unlock(void *buf)
{
switch (wxr_enabled) {
case WXR_UNKNOWN:
abort();
case WXR_ENABLED:
mprotect(buf, 4096, PROT_READ | PROT_WRITE);
break;
case WXR_DISABLED:
break;
}
}
/* Higher quality is slower but has more consistent results. */
static int score_quality = 18;
/* Measures how each input bit affects each output bit. This measures
* both bias and avalanche.
*/
static double
estimate_bias32(uint32_t ABI (*f)(uint32_t), uint64_t rng[2])
{
long n = 1L << score_quality;
long bins[32][32] = {{0}};
for (long i = 0; i < n; i++) {
uint32_t x = xoroshiro128plus(rng);
uint32_t h0 = f(x);
for (int j = 0; j < 32; j++) {
uint32_t bit = UINT32_C(1) << j;
uint32_t h1 = f(x ^ bit);
uint32_t set = h0 ^ h1;
for (int k = 0; k < 32; k++)
bins[j][k] += (set >> k) & 1;
}
}
double mean = 0;
for (int j = 0; j < 32; j++) {
for (int k = 0; k < 32; k++) {
/* FIXME: normalize this somehow */
double diff = (bins[j][k] - n / 2) / (n / 2.0);
mean += (diff * diff) / (32 * 32);
}
}
return sqrt(mean) * 1000.0;
}
static double
estimate_bias64(uint64_t ABI (*f)(uint64_t), uint64_t rng[2])
{
long n = 1L << score_quality;
long bins[64][64] = {{0}};
for (long i = 0; i < n; i++) {
uint64_t x = xoroshiro128plus(rng);
uint64_t h0 = f(x);
for (int j = 0; j < 64; j++) {
uint64_t bit = UINT64_C(1) << j;
uint64_t h1 = f(x ^ bit);
uint64_t set = h0 ^ h1;
for (int k = 0; k < 64; k++)
bins[j][k] += (set >> k) & 1;
}
}
double mean = 0;
for (int j = 0; j < 64; j++) {
for (int k = 0; k < 64; k++) {
/* FIXME: normalize this somehow */
double diff = (bins[j][k] - n / 2) / (n / 2.0);
mean += (diff * diff) / (64 * 64);
}
}
return sqrt(mean) * 1000.0;
}
#define EXACT_SPLIT 32 // must be power of two
static double
exact_bias32(uint32_t ABI (*f)(uint32_t))
{
long long bins[32][32] = {{0}};
static const uint64_t range = (UINT64_C(1) << 32) / EXACT_SPLIT;
#pragma omp parallel for
for (int i = 0; i < EXACT_SPLIT; i++) {
long long b[32][32] = {{0}};
for (uint64_t x = i * range; x < (i + 1) * range; x++) {
uint32_t h0 = f(x);
for (int j = 0; j < 32; j++) {
uint32_t bit = UINT32_C(1) << j;
uint32_t h1 = f(x ^ bit);
uint32_t set = h0 ^ h1;
for (int k = 0; k < 32; k++)
b[j][k] += (set >> k) & 1;
}
}
#pragma omp critical
for (int j = 0; j < 32; j++)
for (int k = 0; k < 32; k++)
bins[j][k] += b[j][k];
}
double mean = 0.0;
for (int j = 0; j < 32; j++) {
for (int k = 0; k < 32; k++) {
double diff = (bins[j][k] - 2147483648L) / 2147483648.0;
mean += (diff * diff) / (32 * 32);
}
}
return sqrt(mean) * 1000.0;
}
static void
usage(FILE *f)
{
fprintf(f, "usage: prospector "
"[-E|L|S] [-4|-8] [-ehs] [-l lib] [-p pattern] [-r n:m] [-t x]\n");
fprintf(f, " -4 Generate 32-bit hash functions (default)\n");
fprintf(f, " -8 Generate 64-bit hash functions\n");
fprintf(f, " -e Measure bias exactly (requires -E)\n");
fprintf(f, " -h Print this help message\n");
fprintf(f, " -l ./lib.so Load hash() from a shared object\n");
fprintf(f, " -p pattern Search only a given pattern\n");
fprintf(f, " -q n Score quality knob (12-30, default: 18)\n");
fprintf(f, " -r n:m Use between n and m operations [3:6]\n");
fprintf(f, " -s Don't use large constants\n");
fprintf(f, " -t x Initial score threshold [10.0]\n");
fprintf(f, " -E Single evaluation mode (requires -p or -l)\n");
fprintf(f, " -S Hash function search mode (default)\n");
fprintf(f, " -L Enumerate output mode (requires -p or -l)\n");
}
static int
parse_operand(struct hf_op *op, char *buf)
{
op->flags |= FOP_LOCKED;
switch (op->type) {
case HF32_NOT:
case HF64_NOT:
return 0;
case HF32_XOR:
case HF32_MUL:
case HF32_ADD:
case HF32_ROT:
case HF64_XOR:
case HF64_MUL:
case HF64_ADD:
case HF64_ROT:
op->constant = strtoull(buf, 0, 16);
return 1;
case HF32_XORL:
case HF32_XORR:
case HF32_ADDL:
case HF32_SUBL:
case HF64_XORL:
case HF64_XORR:
case HF64_ADDL:
case HF64_SUBL:
op->constant = atoi(buf);
return 1;
}
return 0;
}
static int
parse_template(struct hf_op *ops, int n, char *template, int flags)
{
int c = 0;
int offset = flags & F_U64 ? HF64_XOR : 0;
for (char *tok = strtok(template, ","); tok; tok = strtok(0, ",")) {
if (c == n) return 0;
int found = 0;
size_t operand = strcspn(tok, ":");
int sep = tok[operand];
tok[operand] = 0;
ops[c].flags = 0;
for (int i = 0; i < countof(hf_names); i++) {
if (!strcmp(hf_names[i] + 2, tok)) {
found = 1;
ops[c].type = i + offset;
break;
}
}
if (!found)
return 0;
if (sep == ':' && !parse_operand(ops + c, tok + operand + 1))
return 0;
c++;
}
return c;
}
static void *
load_function(const char *so)
{
void *handle = dlopen(so, RTLD_NOW);
if (!handle) {
fprintf(stderr, "prospector: could not load %s\n", so);
exit(EXIT_FAILURE);
}
void *f = dlsym(handle, "hash");
if (!f) {
fprintf(stderr, "prospector: could not find 'hash' in %s\n", so);
exit(EXIT_FAILURE);
}
return f;
}
static uint64_t
uepoch(void)
{
struct timeval tv;
gettimeofday(&tv, NULL);
return 1000000LL * tv.tv_sec + tv.tv_usec;
}
int
main(int argc, char **argv)
{
int nops = 0;
int min = 3;
int max = 6;
int flags = 0;
int use_exact = 0;
double best = 100.0;
char *dynamic = 0;
char *template = 0;
struct hf_op ops[32];
void *buf = execbuf_alloc();
uint64_t rng[2] = {0x2a2bc037b59ff989, 0x6d7db86fa2f632ca};
enum {MODE_SEARCH, MODE_EVAL, MODE_LIST} mode = MODE_SEARCH;
int option;
while ((option = getopt(argc, argv, "48EehLl:q:r:st:p:")) != -1) {
switch (option) {
case '4':
flags &= ~F_U64;
break;
case '8':
flags |= F_U64;
break;
case 'E':
mode = MODE_EVAL;
break;
case 'e':
use_exact = 1;
break;
case 'h': usage(stdout);
exit(EXIT_SUCCESS);
break;
case 'L':
mode = MODE_LIST;
break;
case 'l':
dynamic = optarg;
break;
case 'p':
template = optarg;
break;
case 'r':
if (sscanf(optarg, "%d:%d", &min, &max) != 2 ||
min < 1 || max > countof(ops) || min > max) {
fprintf(stderr, "prospector: invalid range (-r): %s\n",
optarg);
exit(EXIT_FAILURE);
}
break;
case 'q':
score_quality = atoi(optarg);
if (score_quality < 12 || score_quality > 30) {
fprintf(stderr, "prospector: invalid quality: %s\n",
optarg);
exit(EXIT_FAILURE);
}
break;
case 'S':
mode = MODE_SEARCH;
break;
case 's':
flags |= F_TINY;
break;
case 't':
best = strtod(optarg, 0);
break;
default:
usage(stderr);
exit(EXIT_FAILURE);
}
}
/* Get a unique seed */
FILE *urandom = fopen("/dev/urandom", "rb");
if (urandom) {
fread(rng, 1, sizeof(rng), urandom);
fclose(urandom);
}
if (template) {
nops = parse_template(ops, countof(ops), template, flags);
if (!nops) {
fprintf(stderr, "prospector: invalid template\n");
exit(EXIT_FAILURE);
}
}
if (mode == MODE_EVAL) {
double bias;
void *hashptr = 0;
if (template) {
hf_randfunc(ops, nops, rng);
hf_compile(ops, nops, buf);
execbuf_lock(buf);
hashptr = buf;
} else if (dynamic) {
hashptr = load_function(dynamic);
} else {
fprintf(stderr, "prospector: must supply -p or -l\n");
exit(EXIT_FAILURE);
}
uint64_t nhash;
uint64_t beg = uepoch();
if (flags & F_U64) {
uint64_t ABI (*hash)(uint64_t) = hashptr;
if (use_exact)
fputs("warning: no exact bias for 64-bit\n", stderr);
bias = estimate_bias64(hash, rng);
nhash = (1L << score_quality) * 33;
} else {
uint32_t ABI (*hash)(uint32_t) = hashptr;
if (use_exact) {
bias = exact_bias32(hash);
nhash = (1LL << 32) * 33;
} else {
bias = estimate_bias32(hash, rng);
nhash = (1L << score_quality) * 65;
}
}
uint64_t end = uepoch();
printf("bias = %.17g\n", bias);
printf("speed = %.3f nsec / hash\n", (end - beg) * 1000.0 / nhash);
return 0;
}
if (mode == MODE_LIST) {
void *hashptr = 0;
if (template) {
hf_randfunc(ops, nops, rng);
hf_compile(ops, nops, buf);
execbuf_lock(buf);
hashptr = buf;
} else if (dynamic) {
hashptr = load_function(dynamic);
} else {
fprintf(stderr, "prospector: must supply -p or -l\n");
exit(EXIT_FAILURE);
}
if (flags & F_U64) {
uint64_t ABI (*hash)(uint64_t) = hashptr;
uint64_t i = 0;
do
printf("%016llx %016llx\n",
(unsigned long long)i,
(unsigned long long)hash(i));
while (++i);
} else {
uint32_t ABI (*hash)(uint32_t) = hashptr;
uint32_t i = 0;
do
printf("%08lx %08lx\n",
(unsigned long)i,
(unsigned long)hash(i));
while (++i);
}
return 0;
}
for (;;) {
/* Generate */
if (template) {
hf_randfunc(ops, nops, rng);
} else {
nops = min + xoroshiro128plus(rng) % (max - min + 1);
hf_genfunc(ops, nops, flags, rng);
}
/* Evaluate */
double score;
hf_compile(ops, nops, buf);
execbuf_lock(buf);
if (flags & F_U64) {
uint64_t ABI (*hash)(uint64_t) = (void *)buf;
score = estimate_bias64(hash, rng);
} else {
uint32_t ABI (*hash)(uint32_t) = (void *)buf;
score = estimate_bias32(hash, rng);
}
execbuf_unlock(buf);
/* Compare */
if (score < best) {
printf("// score = %.17g\n", score);
hf_printfunc(ops, nops, stdout);
fflush(stdout);
best = score;
}
}
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
eavlSourceTopologyGatherMapOp.h | // Copyright 2010-2014 UT-Battelle, LLC. See LICENSE.txt for more information.
#ifndef EAVL_SOURCE_TOPOLOGY_GATHER_MAP_OP_H
#define EAVL_SOURCE_TOPOLOGY_GATHER_MAP_OP_H
#include "eavlCUDA.h"
#include "eavlCellSet.h"
#include "eavlCellSetExplicit.h"
#include "eavlCellSetAllStructured.h"
#include "eavlDataSet.h"
#include "eavlArray.h"
#include "eavlOpDispatch.h"
#include "eavlOperation.h"
#include "eavlTopology.h"
#include "eavlException.h"
#include <time.h>
#ifdef HAVE_OPENMP
#include <omp.h>
#endif
#ifndef DOXYGEN
template <class CONN>
struct eavlSourceTopologyGatherMapOp_CPU
{
static inline eavlArray::Location location() { return eavlArray::HOST; }
template <class F, class IN, class OUT, class INDEX>
static void call(int nitems, CONN &conn,
const IN s_inputs, OUT outputs,
INDEX indices, F &functor)
{
int *sparseindices = get<0>(indices).array;
int ids[MAX_LOCAL_TOPOLOGY_IDS]; // these are effectively our src indices
#pragma omp parallel for private(ids)
for (int denseindex = 0; denseindex < nitems; ++denseindex)
{
int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)];
int nids;
int shapeType = conn.GetElementComponents(sparseindex, nids, ids);
typename collecttype<OUT>::type out(collect(denseindex, outputs));
out = functor(shapeType, nids, ids, s_inputs);
}
}
};
#if defined __CUDACC__
template <class CONN, class F, class IN, class OUT, class INDEX>
__global__ void
eavlSourceTopologyGatherMapOp_kernel(int nitems, CONN conn,
const IN s_inputs, OUT outputs,
INDEX indices, F functor)
{
int *sparseindices = get<0>(indices).array;
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
int ids[MAX_LOCAL_TOPOLOGY_IDS];
for (int denseindex = threadID; denseindex < nitems; denseindex += numThreads)
{
int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)];
int nids;
int shapeType = conn.GetElementComponents(sparseindex, nids, ids);
collect(denseindex, outputs) = functor(shapeType, nids, ids, s_inputs);
}
}
template <class CONN>
struct eavlSourceTopologyGatherMapOp_GPU
{
static inline eavlArray::Location location() { return eavlArray::DEVICE; }
template <class F, class IN, class OUT, class INDEX>
static void call(int nitems, CONN &conn,
const IN s_inputs, OUT outputs,
INDEX indices, F &functor)
{
int numThreads = 256;
dim3 threads(numThreads, 1, 1);
dim3 blocks (32, 1, 1);
eavlSourceTopologyGatherMapOp_kernel<<< blocks, threads >>>(nitems, conn,
s_inputs, outputs,
indices, functor);
CUDA_CHECK_ERROR();
}
};
#endif
#endif
// ****************************************************************************
// Class: eavlSourceTopologyGatherMapOp
//
// Purpose:
/// Map from one topological element in a mesh to another, with
/// input arrays on the source topology (at sparsely indexed locations as
/// specific by the index array) and with outputs on the destination
/// topology (and densely indexed locations 0 to n-1).
//
// Programmer: Jeremy Meredith
// Creation: August 1, 2013
//
// Modifications:
// ****************************************************************************
template <class IS, class O, class INDEX, class F>
class eavlSourceTopologyGatherMapOp : public eavlOperation
{
protected:
eavlCellSet *cells;
eavlTopology topology;
IS s_inputs;
O outputs;
INDEX indices;
F functor;
public:
eavlSourceTopologyGatherMapOp(eavlCellSet *c, eavlTopology t,
IS is, O o, INDEX ind, F f)
: cells(c), topology(t), s_inputs(is), outputs(o), indices(ind), functor(f)
{
}
virtual void GoCPU()
{
eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells);
eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells);
int n = outputs.first.length();
if (elExp)
{
eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology);
eavlOpDispatch<eavlSourceTopologyGatherMapOp_CPU<eavlExplicitConnectivity> >(n, conn, s_inputs, outputs, indices, functor);
}
else if (elStr)
{
eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology);
eavlOpDispatch<eavlSourceTopologyGatherMapOp_CPU<eavlRegularConnectivity> >(n, conn, s_inputs, outputs, indices, functor);
}
}
virtual void GoGPU()
{
#ifdef HAVE_CUDA
eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells);
eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells);
int n = outputs.first.length();
if (elExp)
{
eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology);
conn.shapetype.NeedOnDevice();
conn.connectivity.NeedOnDevice();
conn.mapCellToIndex.NeedOnDevice();
eavlOpDispatch<eavlSourceTopologyGatherMapOp_GPU<eavlExplicitConnectivity> >(n, conn, s_inputs, outputs, indices, functor);
conn.shapetype.NeedOnHost();
conn.connectivity.NeedOnHost();
conn.mapCellToIndex.NeedOnHost();
}
else if (elStr)
{
eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology);
eavlOpDispatch<eavlSourceTopologyGatherMapOp_GPU<eavlRegularConnectivity> >(n, conn, s_inputs, outputs, indices, functor);
}
#else
THROW(eavlException,"Executing GPU code without compiling under CUDA compiler.");
#endif
}
};
// helper function for type deduction
template <class IS, class O, class INDEX, class F>
eavlSourceTopologyGatherMapOp<IS,O,INDEX,F> *new_eavlSourceTopologyGatherMapOp(eavlCellSet *c, eavlTopology t,
IS is, O o, INDEX indices, F f)
{
return new eavlSourceTopologyGatherMapOp<IS,O,INDEX,F>(c,t,is,o,indices,f);
}
#endif
|
GB_bitmap_assign_A_whole_template.c | //------------------------------------------------------------------------------
// GB_bitmap_assign_A_whole_template: traverse A for bitmap assignment into C
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// This template traverses over all the entries of the matrix A and operates on
// the corresponding entry in C(i,j), using the GB_AIJ_WORK macro. A can be
// hypersparse or sparse, not bitmap or full. It is not a scalar.
{
//--------------------------------------------------------------------------
// matrix assignment: slice the entries of A for each task
//--------------------------------------------------------------------------
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
const int64_t avlen = A->vlen ;
int A_ntasks, A_nthreads ;
GB_SLICE_MATRIX (A, 8, chunk) ;
//--------------------------------------------------------------------------
// traverse of the entries of the matrix A
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1) \
reduction(+:cnvals)
for (tid = 0 ; tid < A_ntasks ; tid++)
{
// if kfirst > klast then task tid does no work at all
int64_t kfirst = kfirst_Aslice [tid] ;
int64_t klast = klast_Aslice [tid] ;
int64_t task_cnvals = 0 ;
//----------------------------------------------------------------------
// traverse over A (:,kfirst:klast)
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// find the part of A(:,k) for this task
//------------------------------------------------------------------
int64_t j = GBH (Ah, k) ;
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, tid, k, kfirst,
klast, pstart_Aslice, Ap, avlen) ;
//------------------------------------------------------------------
// traverse over A(:,j), the kth vector of A
//------------------------------------------------------------------
int64_t pC0 = j * cvlen ; // first entry in C(:,j)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
int64_t pC = i + pC0 ;
// operate on C(i,j) at pC, and A(i,j) at pA. The mask
// can be accessed at pC if M is bitmap or full. A has any
// sparsity format so only A(i,j) can be accessed at pA.
GB_AIJ_WORK (pC, pA) ;
}
}
cnvals += task_cnvals ;
}
//--------------------------------------------------------------------------
// free workspace
//--------------------------------------------------------------------------
GB_WERK_POP (A_ek_slicing, int64_t) ;
}
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morpology is the the application of various kernels, of any size and even
% shape, to a image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/color-private.h"
#include "magick/channel.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/monitor-private.h"
#include "magick/morphology.h"
#include "magick/morphology-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/prepress.h"
#include "magick/quantize.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
/*
Other global definitions used by module.
*/
static inline double MagickMin(const double x,const double y)
{
return( x < y ? x : y);
}
static inline double MagickMax(const double x,const double y)
{
return( x > y ? x : y);
}
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t l,f;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel = kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MaxTextExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *)NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum = +MagickHuge;
kernel->maximum = -MagickHuge;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
GetMagickToken(p,&p,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if ( kernel->minimum == MagickHuge )
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string)
{
char
token[MaxTextExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
GetMagickToken(kernel_string,&p,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *)NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string)
{
KernelInfo
*kernel,
*new_kernel;
char
token[MaxTextExtent];
const char
*p;
size_t
kernel_number;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p = kernel_string;
kernel = NULL;
kernel_number = 0;
while ( GetMagickToken(p,NULL,token), *token != '\0' ) {
/* ignore extra or multiple ';' kernel separators */
if ( *token != ';' ) {
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) *token) != 0)
new_kernel = ParseKernelName(p);
else /* otherwise a user defined kernel array */
new_kernel = ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if ( new_kernel == (KernelInfo *) NULL ) {
(void) FormatLocaleFile(stderr, "Failed to parse kernel number #%.20g\n",
(double) kernel_number);
if ( kernel != (KernelInfo *) NULL )
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if ( kernel == (KernelInfo *) NULL )
kernel = new_kernel;
else
LastKernelInfo(kernel)->next = new_kernel;
}
/* look for the next kernel in list */
p = strchr(p, ';');
if ( p == (char *) NULL )
break;
p++;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *)NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(1,
sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (< 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +MagickSQ2;
kernel->values[5] = kernel->values[7]= -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19");
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +MagickSQ2;
kernel->values[7] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +MagickSQ2;
kernel->values[8] = -MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -MagickSQ2;
kernel->values[6] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>"));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>"));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;");
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo(
"ThinSE:41; ThinSE:42; ThinSE:43");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (new_kernel->values == (double *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if ( kernel->next != (KernelInfo *) NULL )
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(double *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle)
{
KernelInfo
*clone,
*last;
last = kernel;
while(1) {
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, angle);
if ( SameKernelInfo(kernel, clone) == MagickTrue )
break;
LastKernelInfo(last)->next = clone;
last = clone;
}
clone = DestroyKernelInfo(clone); /* kernel has repeated - junk the clone */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but
% without any user controls. This allows internel programs to use this
% function, to actually perform a specific task without possible interference
% by any API user supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ChannelType channel, const ssize_t iterations,
% const KernelInfo *kernel, const CompositeMethod compose,
% const double bias, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o channel: the channels to which the operations are applied
% The channel 'sync' flag determines if 'alpha weighting' is
% applied for convolution style operations.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* Apply a Morphology Primative to an image using the given kernel.
** Two pre-created images must be provided, and no image is created.
** It returns the number of pixels that changed between the images
** for result convergence determination.
*/
static ssize_t MorphologyPrimitive(const Image *image, Image *result_image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,const double bias,ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*p_view,
*q_view;
register ssize_t
i;
size_t
*changes,
changed,
virt_width;
ssize_t
y,
offx,
offy;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(result_image != (Image *) NULL);
assert(result_image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
status=MagickTrue;
progress=0;
p_view=AcquireVirtualCacheView(image,exception);
q_view=AcquireAuthenticCacheView(result_image,exception);
virt_width=image->columns+kernel->width-1;
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* kernel is used as is, without reflection */
break;
default:
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changes[i]=0;
if ( method == ConvolveMorphology && kernel->width == 1 )
{ /* Special handling (for speed) of vertical (blur) kernels.
** This performs its handling in columns rather than in rows.
** This is only done for convolve as it is the only method that
** generates very large 1-D vertical kernels (such as a 'BlurKernel')
**
** Timing tests (on single CPU laptop)
** Using a vertical 1-d Blue with normal row-by-row (below)
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.807u
** Using this column method
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.620u
**
** Anthony Thyssen, 14 June 2010
*/
register ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,result_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
y;
ssize_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view,x,-offy,1,image->rows+kernel->height-1,
exception);
q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = offy;
for (y=0; y < (ssize_t) image->rows; y++)
{
DoublePixelPacket
result;
register ssize_t
v;
register const double
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,GetPixelIndex(p_indexes+r));
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNaN(*k) ) continue;
result.red += (*k)*GetPixelRed(k_pixels);
result.green += (*k)*GetPixelGreen(k_pixels);
result.blue += (*k)*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) &&
(image->matte == MagickTrue))
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+y,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
double
gamma; /* divisor, sum of color alpha weighting */
MagickRealType
alpha; /* alpha weighting for colors : alpha */
size_t
count; /* alpha valus collected, number kernel values */
count=0;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNaN(*k) ) continue;
alpha=QuantumScale*(QuantumRange-GetPixelOpacity(k_pixels));
gamma += alpha; /* normalize alpha weights only */
count++; /* number of alpha values collected */
alpha*=(*k); /* include kernel weighting now */
result.red += alpha*GetPixelRed(k_pixels);
result.green += alpha*GetPixelGreen(k_pixels);
result.blue += alpha*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += alpha*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
/* Sync'ed channels, all channels are modified */
gamma=(double) count/(fabs((double) gamma) < MagickEpsilon ?
MagickEpsilon : gamma);
SetPixelRed(q,ClampToQuantum(gamma*result.red));
SetPixelGreen(q,ClampToQuantum(gamma*result.green));
SetPixelBlue(q,ClampToQuantum(gamma*result.blue));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,ClampToQuantum(gamma*result.index));
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q))
|| ( p[r].green != GetPixelGreen(q))
|| ( p[r].blue != GetPixelBlue(q))
|| ( p[r].opacity != GetPixelOpacity(q))
|| ( image->colorspace == CMYKColorspace &&
GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+y) ) )
changes[id]++;
p++;
q++;
} /* y */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* x */
result_image->type=image->type;
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changed+=changes[i];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
** Normal handling of horizontal or rectangular kernels (row by row)
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,result_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
x;
size_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width,
kernel->height, exception);
q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
DoublePixelPacket
result,
min,
max;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,GetPixelIndex(p_indexes+r));
/* Defaults */
min.red =
min.green =
min.blue =
min.opacity =
min.index = (double) QuantumRange;
max.red =
max.green =
max.blue =
max.opacity =
max.index = 0.0;
/* default result is the original pixel value */
result.red = (double) p[r].red;
result.green = (double) p[r].green;
result.blue = (double) p[r].blue;
result.opacity = QuantumRange - (double) p[r].opacity;
result.index = 0.0;
if ( image->colorspace == CMYKColorspace)
result.index = (double) GetPixelIndex(p_indexes+r);
switch (method) {
case ConvolveMorphology:
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
break;
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
/* use a boolean flag indicating when first match found */
result.red = 0.0; /* result is not used otherwise */
break;
default:
break;
}
switch ( method ) {
case ConvolveMorphology:
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** Correlation is actually the same as this but without reflecting
** the kernel, and thus 'lower-level' that Convolution. However
** as Convolution is the more common method used, and it does not
** really cost us much in terms of processing to use a reflected
** kernel, so it is Convolution that is implemented.
**
** Correlation will have its kernel reflected before calling
** this function to do a Convolve.
**
** For more details of Correlation vs Convolution see
** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
result.red += (*k)*k_pixels[u].red;
result.green += (*k)*k_pixels[u].green;
result.blue += (*k)*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum((MagickRealType) result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum((MagickRealType) result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum((MagickRealType) result.blue));
if (((channel & OpacityChannel) != 0) &&
(image->matte == MagickTrue))
SetPixelOpacity(q,ClampToQuantum((MagickRealType) result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
double
alpha, /* alpha weighting for colors : alpha */
gamma; /* divisor, sum of color alpha weighting */
size_t
count; /* alpha valus collected, number kernel values */
count=0;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
alpha=QuantumScale*(QuantumRange-k_pixels[u].opacity);
gamma += alpha; /* normalize alpha weights only */
count++; /* number of alpha values collected */
alpha=alpha*(*k); /* include kernel weighting now */
result.red += alpha*k_pixels[u].red;
result.green += alpha*k_pixels[u].green;
result.blue += alpha*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index+=alpha*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Sync'ed channels, all channels are modified */
gamma=(double) count/(fabs((double) gamma) < MagickEpsilon ?
MagickEpsilon : gamma);
SetPixelRed(q,ClampToQuantum((MagickRealType) (gamma*result.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (gamma*result.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (gamma*result.blue)));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum((MagickRealType) (gamma*
result.index)));
}
break;
case ErodeMorphology:
/* Minimum Value within kernel neighbourhood
**
** NOTE that the kernel is not reflected for this operation!
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateMorphology:
/* Maximum Value within kernel neighbourhood
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
**
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* Minimum of Foreground Pixel minus Maxumum of Background Pixels
**
** NOTE that the kernel is not reflected for this operation,
** and consists of both foreground and background pixel
** neighbourhoods, 0.0 for background, and 1.0 for foreground
** with either Nan or 0.5 values for don't care.
**
** Note that this will never produce a meaningless negative
** result. Such results can cause Thinning/Thicken to not work
** correctly when used against a greyscale image.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) ) continue;
if ( (*k) > 0.7 )
{ /* minimim of foreground pixels */
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(
k_indexes+u));
}
else if ( (*k) < 0.3 )
{ /* maximum of background pixels */
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Pattern Match if difference is positive */
min.red -= max.red; Maximize( min.red, 0.0 );
min.green -= max.green; Maximize( min.green, 0.0 );
min.blue -= max.blue; Maximize( min.blue, 0.0 );
min.opacity -= max.opacity; Maximize( min.opacity, 0.0 );
min.index -= max.index; Maximize( min.index, 0.0 );
break;
case ErodeIntensityMorphology:
/* Select Pixel with Minimum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity.
**
** NOTE that the kernel is not reflected for this operation!
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
if ( result.red == 0.0 ||
GetPixelIntensity(image,&(k_pixels[u])) < GetPixelIntensity(result_image,q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changes[id]++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateIntensityMorphology:
/* Select Pixel with Maximum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity (yet).
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue; /* boolean kernel */
if ( result.red == 0.0 ||
GetPixelIntensity(image,&(k_pixels[u])) > GetPixelIntensity(result_image,q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changes[id]++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case IterativeDistanceMorphology:
/* Work out an iterative distance from black edge of a white image
** shape. Essentually white values are decreased to the smallest
** 'distance from edge' it can find.
**
** It works by adding kernel values to the neighbourhood, and and
** select the minimum value found. The kernel is rotated before
** use, so kernel distances match resulting distances, when a user
** provided asymmetric kernel is applied.
**
**
** This code is almost identical to True GrayScale Morphology But
** not quite.
**
** GreyDilate Kernel values added, maximum value found Kernel is
** rotated before use.
**
** GrayErode: Kernel values subtracted and minimum value found No
** kernel rotation used.
**
** Note the the Iterative Distance method is essentially a
** GrayErode, but with negative kernel values, and kernel
** rotation applied.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case UndefinedMorphology:
default:
break; /* Do nothing */
}
/* Final mathematics of results (combine with original image?)
**
** NOTE: Difference Morphology operators Edge* and *Hat could also
** be done here but works better with iteration as a image difference
** in the controlling function (below). Thicken and Thinning however
** should be done here so thay can be iterated correctly.
*/
switch ( method ) {
case HitAndMissMorphology:
case ErodeMorphology:
result = min; /* minimum of neighbourhood */
break;
case DilateMorphology:
result = max; /* maximum of neighbourhood */
break;
case ThinningMorphology:
/* subtract pattern match from original */
result.red -= min.red;
result.green -= min.green;
result.blue -= min.blue;
result.opacity -= min.opacity;
result.index -= min.index;
break;
case ThickenMorphology:
/* Add the pattern matchs to the original */
result.red += min.red;
result.green += min.green;
result.blue += min.blue;
result.opacity += min.opacity;
result.index += min.index;
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case UndefinedMorphology:
case ConvolveMorphology:
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
break; /* full pixel was directly assigned - not a channel method */
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte == MagickTrue )
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( p[r].opacity != GetPixelOpacity(q) )
|| ( image->colorspace == CMYKColorspace &&
GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) )
changes[id]++;
p++;
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* y */
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changed+=changes[i];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t)changed : -1);
}
/* This is almost identical to the MorphologyPrimative() function above,
** but will apply the primitive directly to the actual image using two
** passes, once in each direction, with the results of the previous (and
** current) row being re-used.
**
** That is after each row is 'Sync'ed' into the image, the next row will
** make use of those values as part of the calculation of the next row.
** It then repeats, but going in the oppisite (bottom-up) direction.
**
** Because of this 're-use of results' this function can not make use
** of multi-threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,ExceptionInfo *exception)
{
CacheView
*auth_view,
*virt_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y, offx, offy;
size_t
changed,
virt_width;
status=MagickTrue;
changed=0;
progress=0;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case DistanceMorphology:
case VoronoiMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
#if 0
case ?????Morphology:
/* kernel is used as is, without reflection */
break;
#endif
default:
assert("Not a PrimativeDirect Morphology Method" != (char *) NULL);
break;
}
/* DO NOT THREAD THIS CODE! */
/* two views into same image (virtual, and actual) */
virt_view=AcquireVirtualCacheView(image,exception);
auth_view=AcquireAuthenticCacheView(image,exception);
virt_width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
x;
ssize_t
r;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only top half of kernel is processed as we do a single pass downward
** through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
break;
p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = (ssize_t) virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result;
/* Starting Defaults */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, while coping the color
** values of the closest pixel.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel so that alpha can
** also be used as part of the results.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue))
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( p[r].opacity != GetPixelOpacity(q) )
|| ( image->colorspace == CMYKColorspace &&
GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) )
changed++; /* The pixel was changed in some way! */
p++; /* increment pixel buffers */
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
== MagickFalse )
status=MagickFalse;
} /* y */
/* Do the reversed pass through the image */
for (y=(ssize_t)image->rows-1; y >= 0; y--)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
x;
ssize_t
r;
if (status == MagickFalse)
break;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only the bottom half of the kernel will be processes as we
** up the image.
*/
p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* adjust positions to end of row */
p += image->columns-1;
q += image->columns-1;
/* offset to origin in 'p'. while 'q' points to it directly */
r = offx;
for (x=(ssize_t)image->columns-1; x >= 0; x--)
{
ssize_t
v;
register ssize_t
u;
register const double
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result;
/* Default - previously modified pixel */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, coping the closest color.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel.
*/
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue))
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( p[r].opacity != GetPixelOpacity(q) )
|| ( image->colorspace == CMYKColorspace &&
GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) )
changed++; /* The pixel was changed in some way! */
p--; /* go backward through pixel buffers */
q--;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
== MagickFalse )
status=MagickFalse;
} /* y */
auth_view=DestroyCacheView(auth_view);
virt_view=DestroyCacheView(virt_view);
return(status ? (ssize_t) changed : -1);
}
/* Apply a Morphology by calling one of the above low level primitive
** application functions. This function handles any iteration loops,
** composition or re-iteration of results, and compound morphology methods
** that is based on multiple low-level (staged) morphology methods.
**
** Basically this provides the complex grue between the requested morphology
** method and raw low-level implementation (above).
*/
MagickExport Image *MorphologyApply(const Image *image, const ChannelType
channel,const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,
const double bias, ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[80];
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *)NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsMagickTrue(GetImageArtifact(image,"verbose"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special == MagickTrue )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse)
{
InheritException(exception,&rslt_image->exception);
goto error_cleanup;
}
changed = MorphologyPrimitiveDirect(rslt_image, method,
channel, kernel, exception);
if ( verbose == MagickTrue )
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
(void) CompositeImageChannel(rslt_image, DefaultChannels,
CopyOpacityCompositeOp, image, 0, 0);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if ( verbose == MagickTrue ) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MaxTextExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass) == MagickFalse)
{
InheritException(exception,&work_image->exception);
goto error_cleanup;
}
/* work_image->type=image->type; ??? */
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
channel, this_kernel, bias, exception);
if ( verbose == MagickTrue ) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if ( verbose == MagickTrue && kernel_changed != (size_t)changed )
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if ( verbose == MagickTrue && stage_loop < stage_limit )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, "\n%s: Difference with original image",
CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, image, 0, 0);
break;
case EdgeMorphology:
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode",
CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, save_image, 0, 0);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if ( verbose == MagickTrue ) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImageChannel(rslt_image,
(ChannelType) (channel & ~SyncChannels), rslt_compose,
curr_image, 0, 0);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImageChannel() applies a user supplied kernel to the image
% according to the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-bias")
% * Kernel Scale/normalize settings ("-set 'option:convolve:scale'")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-set option:showkernel 1")
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% Image *MorphologyImageChannel(const Image *image, const ChannelType
% channel,MorphologyMethod method,const ssize_t iterations,
% KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImageChannel(const Image *image,
const ChannelType channel,const MorphologyMethod method,
const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception)
{
KernelInfo
*curr_kernel;
CompositeOperator
compose;
Image
*morphology_image;
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
curr_kernel = (KernelInfo *) kernel;
if ( method == ConvolveMorphology || method == CorrelateMorphology )
{
const char
*artifact;
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *)NULL ) {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL) {
curr_kernel=DestroyKernelInfo(curr_kernel);
return((Image *) NULL);
}
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
/* display the (normalized) kernel via stderr */
if ( IsMagickTrue(GetImageArtifact(image,"showkernel"))
|| IsMagickTrue(GetImageArtifact(image,"convolve:showkernel"))
|| IsMagickTrue(GetImageArtifact(image,"morphology:showkernel")) )
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{ const char
*artifact;
compose = UndefinedCompositeOp; /* use default for method */
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL)
compose = (CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,artifact);
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image, channel, method, iterations,
curr_kernel, compose, image->bias, exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod
method, const ssize_t iterations,const KernelInfo *kernel, ExceptionInfo
*exception)
{
Image
*morphology_image;
morphology_image=MorphologyImageChannel(image,DefaultChannels,method,
iterations,kernel,exception);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register size_t
i,j,x,y;
register double
*k,t;
k=kernel->values;
for( i=0, x=kernel->width-1; i<=x; i++, x--)
for( j=0, y=kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
double
t;
register double
*k;
size_t
i,
j;
k=kernel->values;
for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
GeometryFlags
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = (GeometryFlags) ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register ssize_t
i;
register double
pos_scale,
neg_scale;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if ( ! IsNaN(kernel->values[i]) )
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'showkernel' option request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if ( IsNaN(k->values[i]) )
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if ( IsNaN(kernel->values[i]) )
kernel->values[i] = 0.0;
return;
}
|
GB_unop__abs_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_fp64_fp64)
// op(A') function: GB (_unop_tran__abs_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = fabs (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = fabs (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = fabs (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
jacobi-ompacc.c | // Naive version without any optimizations
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// Add timing support
#include <sys/time.h>
double time_stamp()
{
struct timeval t;
double time;
gettimeofday(&t,(struct timezone*)NULL);
time = t.tv_sec + 1.0e-6*t.tv_usec;
return time;
}
double time1, time2;
void driver(void);
void initialize(void);
void jacobi(void);
void error_check(void);
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This c version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define REAL float // flexible between float and double
#define MSIZE 512
REAL error_ref= 9.212767E-04, resid_ref = 2.355429E-08; // depending on MSIZE!!
int n,m,mits;
REAL tol,relax=1.0,alpha=0.0543;
REAL u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE];
REAL dx,dy;
int main (void)
{
// float toler;
/* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE);
scanf ("%d",&n);
scanf ("%d",&m);
printf("Input tol - error tolerance for iterative solver\n");
scanf("%f",&toler);
tol=(double)toler;
printf("Input mits - Maximum iterations for solver\n");
scanf("%d",&mits);
*/
n=MSIZE;
m=MSIZE;
tol=0.0000000001;
mits=5000;
#if 0 // Not yet support concurrent CPU and GPU threads
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp single
printf("Running using %d threads...\n",omp_get_num_threads());
}
#endif
#endif
driver ( ) ;
return 0;
}
/*************************************************************
* Subroutine driver ()
* This is where the arrays are allocated and initialzed.
*
* Working varaibles/arrays
* dx - grid spacing in x direction
* dy - grid spacing in y direction
*************************************************************/
void driver( )
{
initialize();
time1 = time_stamp();
/* Solve Helmholtz equation */
jacobi ();
time2 = time_stamp();
printf("------------------------\n");
printf("Execution time = %f\n",time2-time1);
/* error_check (n,m,alpha,dx,dy,u,f)*/
error_check ( );
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize( )
{
int i,j, xx,yy;
//double PI=3.1415926;
dx = 2.0 / (n-1);
dy = 2.0 / (m-1);
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(xx,yy,j,i)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx =(int)( -1.0 + dx * (i-1));
yy = (int)(-1.0 + dy * (j-1)) ;
u[i][j] = 0.0;
f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\
- 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy);
}
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
void jacobi( )
{
REAL omega;
int i,j,k;
REAL error,resid,ax,ay,b;
// double error_local;
// float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2;
// float te1,te2;
// float second;
omega=relax;
/*
* Initialize coefficients */
ax = 1.0/(dx*dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y-direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
while ((k<=mits)&&(error>tol))
{
error = 0.0;
/* Copy new solution into old */
// Must split the omp for into two parallel for regions since the translation focuses on parallel to generate the outlined kernel
// We need two CUDA kernels for implementing global synchronization so we have to have two omp parallel directives!!
//#pragma omp target map(to:n, m, omega, ax, ay, u[0:n][0:m],f[0:n][0:m]) map(alloc:uold[0:n][0:m])
//#pragma omp parallel
// {
#pragma omp target map(to:n, m, u[0:n][0:m]) map(from:uold[0:n][0:m])
#pragma omp parallel for private(j,i)
for(i=0;i<n;i++)
for(j=0;j<m;j++)
uold[i][j] = u[i][j];
#pragma omp target map(to:n, m, omega, ax, ay, b, f[0:n][0:m], uold[0:n][0:m]) map(from:u[0:n][0:m])
#pragma omp parallel for private(resid,j,i) reduction(+:error) // nowait
for (i=1;i<(n-1);i++)
for (j=1;j<(m-1);j++)
{
resid = (ax*(uold[i-1][j] + uold[i+1][j])\
+ ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b;
u[i][j] = uold[i][j] - omega * resid;
error = error + resid*resid ;
}
// }
/* omp end parallel */
/* Error check */
if (k%500==0)
printf("Finished %d iteration with error =%f\n",k, error);
error = sqrt(error)/(n*m);
k = k + 1;
} /* End iteration loop */
printf("Total Number of Iterations:%d\n",k);
printf("Residual:%E\n", error);
printf("Residual_ref :%E\n", resid_ref);
printf ("Diff ref=%E\n", fabs(error-resid_ref));
assert (fabs(error-resid_ref) < 1E-14);
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check ( )
{
int i,j;
REAL xx,yy,temp,error;
dx = 2.0 / (n-1);
dy = 2.0 / (m-1);
error = 0.0 ;
//#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx = -1.0 + dx * (i-1);
yy = -1.0 + dy * (j-1);
temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy);
error = error + temp*temp;
}
error = sqrt(error)/(n*m);
printf("Solution Error :%E \n",error);
printf("Solution Error Ref :%E \n",error_ref);
printf ("Diff ref=%E\n", fabs(error-error_ref));
assert (fabs(error-error_ref) < 1E-14);
}
|
mttkrp.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <ParTI.h>
#include "hicoo.h"
int sptMTTKRPHiCOO_3D(
sptSparseTensorHiCOO const * const hitsr,
sptMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode);
int sptMTTKRPHiCOO_3D_Blocked(
sptSparseTensorHiCOO const * const hitsr,
sptMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode);
int sptMTTKRPHiCOO_3D_MatrixTiling(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode);
int sptMTTKRPHiCOO_4D_MatrixTiling(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode);
int sptMTTKRPHiCOO_3D_MatrixTiling_init(
sptSparseTensorHiCOO const * const hitsr,
sptMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode);
/**
* Matriced sparse tensor in HiCOO format times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode
* @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size
* ndims[mode] * R
* @param[in] hitsr the HiCOO sparse tensor input
* @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary
* @param[in] mats_order the order of the Khatri-Rao products
* @param[in] mode the mode on which the MTTKRP is performed
* @param[in] scratch an temporary array to store intermediate results, space assigned before this function
*
* This function uses support arbitrary-order sparse tensors with Khatri-Rao
* products of dense factor matrices, the output is the updated dense matrix for the "mode".
*/
int sptMTTKRPHiCOO(
sptSparseTensorHiCOO const * const hitsr,
sptMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode)
{
sptIndex const nmodes = hitsr->nmodes;
if(nmodes == 3) {
sptAssert(sptMTTKRPHiCOO_3D_Blocked(hitsr, mats, mats_order, mode) == 0);
return 0;
}
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptIndex const stride = mats[0]->stride;
sptValueVector scratch; // Temporary array
/* Check the mats. */
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptIndex const R = mats[mode]->ncols;
sptMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptNewValueVector(&scratch, R, R);
sptIndex * block_coord = (sptIndex*)malloc(nmodes * sizeof(*block_coord));
sptIndex * ele_coord = (sptIndex*)malloc(nmodes * sizeof(*ele_coord));
/* Loop kernels */
for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) {
sptNnzIndex kptr_begin = hitsr->kptr.data[k];
sptNnzIndex kptr_end = hitsr->kptr.data[k+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
/* Block indices */
for(sptIndex m=0; m<nmodes; ++m)
block_coord[m] = hitsr->binds[m].data[b];
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
/* Element indices */
for(sptIndex m=0; m<nmodes; ++m)
ele_coord[m] = (block_coord[m] << hitsr->sb_bits) + hitsr->einds[m].data[z];
/* Multiply the 1st matrix */
sptIndex times_mat_index = mats_order[1];
sptMatrix * times_mat = mats[times_mat_index];
sptIndex tmp_i = ele_coord[times_mat_index];
sptValue const entry = vals[z];
for(sptIndex r=0; r<R; ++r) {
scratch.data[r] = entry * times_mat->values[tmp_i * stride + r];
}
/* Multiply the rest matrices */
for(sptIndex m=2; m<nmodes; ++m) {
times_mat_index = mats_order[m];
times_mat = mats[times_mat_index];
tmp_i = ele_coord[times_mat_index];
for(sptIndex r=0; r<R; ++r) {
scratch.data[r] *= times_mat->values[tmp_i * stride + r];
}
}
sptIndex const mode_i = ele_coord[mode];
for(sptIndex r=0; r<R; ++r) {
mvals[mode_i * stride + r] += scratch.data[r];
}
} // End loop entries
} // End loop blocks
} // End loop kernels
free(block_coord);
free(ele_coord);
sptFreeValueVector(&scratch);
return 0;
}
/* Very slow version! Slower than COO in Morton order. */
int sptMTTKRPHiCOO_3D(
sptSparseTensorHiCOO const * const hitsr,
sptMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode)
{
sptIndex const nmodes = hitsr->nmodes;
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptIndex const stride = mats[0]->stride;
/* Check the mats. */
sptAssert(nmodes ==3);
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptIndex const R = mats[mode]->ncols;
sptMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptIndex times_mat_index_1 = mats_order[1];
sptMatrix * restrict times_mat_1 = mats[times_mat_index_1];
sptIndex times_mat_index_2 = mats_order[2];
sptMatrix * restrict times_mat_2 = mats[times_mat_index_2];
/* block_coord is reused, no need to store ele_coord for 3D tensors */
sptBlockIndex * block_coord = (sptBlockIndex*)malloc(nmodes * sizeof(*block_coord));
sptIndex mode_i;
sptIndex tmp_i_1, tmp_i_2;
sptValue entry;
/* Loop kernels */
for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) {
sptNnzIndex kptr_begin = hitsr->kptr.data[k];
sptNnzIndex kptr_end = hitsr->kptr.data[k+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
/* Block indices */
for(sptIndex m=0; m<nmodes; ++m)
block_coord[m] = hitsr->binds[m].data[b];
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
mode_i = (block_coord[mode] << hitsr->sb_bits) + hitsr->einds[mode].data[z];
tmp_i_1 = (block_coord[times_mat_index_1] << hitsr->sb_bits) + hitsr->einds[times_mat_index_1].data[z];
tmp_i_2 = (block_coord[times_mat_index_2] << hitsr->sb_bits) + hitsr->einds[times_mat_index_2].data[z];
entry = vals[z];
for(sptIndex r=0; r<R; ++r) {
mvals[mode_i * stride + r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r];
}
} // End loop entries
} // End loop blocks
} // End loop kernels
free(block_coord);
return 0;
}
int sptMTTKRPHiCOO_3D_Blocked(
sptSparseTensorHiCOO const * const hitsr,
sptMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode)
{
sptIndex const nmodes = hitsr->nmodes;
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptIndex const stride = mats[0]->stride;
/* Check the mats. */
sptAssert(nmodes ==3);
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptIndex const R = mats[mode]->ncols;
sptMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptIndex times_mat_index_1 = mats_order[1];
sptMatrix * restrict times_mat_1 = mats[times_mat_index_1];
sptIndex times_mat_index_2 = mats_order[2];
sptMatrix * restrict times_mat_2 = mats[times_mat_index_2];
sptElementIndex mode_i;
sptElementIndex tmp_i_1, tmp_i_2;
sptValue entry;
sptValue * restrict blocked_mvals;
sptValue * restrict blocked_times_mat_1;
sptValue * restrict blocked_times_mat_2;
/* Loop kernels */
for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) {
sptNnzIndex kptr_begin = hitsr->kptr.data[k];
sptNnzIndex kptr_end = hitsr->kptr.data[k+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride;
blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
mode_i = hitsr->einds[mode].data[z];
tmp_i_1 = hitsr->einds[times_mat_index_1].data[z];
tmp_i_2 = hitsr->einds[times_mat_index_2].data[z];
entry = vals[z];
sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride;
sptValue * const restrict blocked_times_mat_1_row = blocked_times_mat_1 + tmp_i_1 * stride;
sptValue * const restrict blocked_times_mat_2_row = blocked_times_mat_2 + tmp_i_2 * stride;
for(sptIndex r=0; r<R; ++r) {
bmvals_row[r] += entry *
blocked_times_mat_1_row[r]
* blocked_times_mat_2_row[r];
}
} // End loop entries
} // End loop blocks
} // End loop kernels
return 0;
}
int sptMTTKRPHiCOO_MatrixTiling(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode)
{
sptIndex const nmodes = hitsr->nmodes;
if(nmodes == 3) {
sptAssert(sptMTTKRPHiCOO_3D_MatrixTiling(hitsr, mats, mats_order, mode) == 0);
return 0;
}
// else if(nmodes == 4) {
// sptAssert(sptMTTKRPHiCOO_4D_MatrixTiling(hitsr, mats, mats_order, mode) == 0);
// return 0;
// }
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptElementIndex const stride = mats[0]->stride;
sptValueVector scratch; // Temporary array
/* Check the mats. */
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptElementIndex const R = mats[mode]->ncols;
sptRankMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptNewValueVector(&scratch, R, R);
sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat));
/* Loop kernels */
for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) {
sptNnzIndex kptr_begin = hitsr->kptr.data[k];
sptNnzIndex kptr_end = hitsr->kptr.data[k+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
/* Block indices */
for(sptIndex m=0; m<nmodes; ++m)
blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
/* Multiply the 1st matrix */
sptIndex times_mat_index = mats_order[1];
sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z];
sptValue const entry = vals[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
/* Multiply the rest matrices */
for(sptIndex m=2; m<nmodes; ++m) {
times_mat_index = mats_order[m];
tmp_i = hitsr->einds[times_mat_index].data[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
}
sptElementIndex const mode_i = hitsr->einds[mode].data[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r];
}
} // End loop entries
} // End loop blocks
} // End loop kernels
free(blocked_times_mat);
sptFreeValueVector(&scratch);
return 0;
}
int sptMTTKRPHiCOO_3D_MatrixTiling(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode)
{
sptIndex const nmodes = hitsr->nmodes;
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptElementIndex const stride = mats[0]->stride;
/* Check the mats. */
sptAssert(nmodes ==3);
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptElementIndex const R = mats[mode]->ncols;
sptRankMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptIndex times_mat_index_1 = mats_order[1];
sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1];
sptIndex times_mat_index_2 = mats_order[2];
sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2];
sptElementIndex mode_i;
sptElementIndex tmp_i_1, tmp_i_2;
sptValue entry;
sptValue * restrict blocked_mvals;
sptValue * restrict blocked_times_mat_1;
sptValue * restrict blocked_times_mat_2;
/* Loop kernels */
// sptTimer loop_timer, kernel_timer, block_timer, element_timer, elementmat_timer, blockmat_timer;
// double loop_etime = 0, kernel_etime = 0, block_etime = 0, element_etime = 0, elementmat_etime = 0, blockmat_etime = 0;
// sptNewTimer(&loop_timer, 0);
// sptNewTimer(&kernel_timer, 0);
// sptNewTimer(&block_timer, 0);
// sptNewTimer(&element_timer, 0);
// sptNewTimer(&elementmat_timer, 0);
// sptNewTimer(&blockmat_timer, 0);
// sptStartTimer(loop_timer);
for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) {
sptNnzIndex kptr_begin = hitsr->kptr.data[k];
sptNnzIndex kptr_end = hitsr->kptr.data[k+1];
/* Loop blocks in a kernel */
// printf("kptr_begin: %"PARTI_PRI_NNZ_INDEX", kptr_end: %"PARTI_PRI_NNZ_INDEX"\n", kptr_begin, kptr_end);
// sptStartTimer(kernel_timer);
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
// sptStartTimer(blockmat_timer);
blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride;
blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
// sptStopTimer(blockmat_timer);
// blockmat_etime += sptElapsedTime(blockmat_timer);
// sptPrintElapsedTime(blockmat_timer, "===Blockmat Timer");
/* Loop entries in a block */
// printf("bptr_begin: %"PARTI_PRI_INDEX", bptr_end: %"PARTI_PRI_INDEX"\n", bptr_begin, bptr_end);
// sptStartTimer(block_timer);
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
// sptStartTimer(elementmat_timer);
mode_i = hitsr->einds[mode].data[z];
tmp_i_1 = hitsr->einds[times_mat_index_1].data[z];
tmp_i_2 = hitsr->einds[times_mat_index_2].data[z];
// mode_i = (sptBlockMatrixIndex)hitsr->einds[mode].data[z];
// tmp_i_1 = (sptBlockMatrixIndex)hitsr->einds[times_mat_index_1].data[z];
// tmp_i_2 = (sptBlockMatrixIndex)hitsr->einds[times_mat_index_2].data[z];
entry = vals[z];
sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride;
sptValue * const restrict blocked_times_mat_1_row = blocked_times_mat_1 + tmp_i_1 * stride;
sptValue * const restrict blocked_times_mat_2_row = blocked_times_mat_2 + tmp_i_2 * stride;
// sptStopTimer(elementmat_timer);
// elementmat_etime += sptElapsedTime(elementmat_timer);
// sptPrintElapsedTime(elementmat_timer, "===Elementmat Timer");
// sptStartTimer(element_timer);
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
// blocked_mvals[mode_i * stride + r] += entry *
// blocked_times_mat_1[tmp_i_1 * stride + r] *
// blocked_times_mat_2[tmp_i_2 * stride + r];
bmvals_row[r] += entry *
blocked_times_mat_1_row[r]
* blocked_times_mat_2_row[r];
}
// sptStopTimer(element_timer);
// element_etime += sptElapsedTime(element_timer);
// sptPrintElapsedTime(element_timer, "===Element Timer");
} // End loop entries
// sptStopTimer(block_timer);
// block_etime += sptElapsedTime(block_timer);
// sptPrintElapsedTime(block_timer, "==Block Timer");
} // End loop blocks
// sptStopTimer(kernel_timer);
// kernel_etime += sptElapsedTime(kernel_timer);
// sptPrintElapsedTime(kernel_timer, "=Kernel Timer");
} // End loop kernels
// sptStopTimer(loop_timer);
// loop_etime += sptElapsedTime(loop_timer);
// sptPrintElapsedTime(loop_timer, "=Loop Timer");
// printf("\nTotal Elementmat Time: %lf\n", elementmat_etime);
// printf("Total Element Time: %lf\n", element_etime);
// printf("Total Blockmat Time: %lf\n", blockmat_etime);
// printf("Total Block Time: %lf\n", block_etime);
// printf("Total Kernel Time: %lf\n", kernel_etime);
// printf("Total Loop Time: %lf\n\n", loop_etime);
// sptFreeTimer(loop_timer);
// sptFreeTimer(kernel_timer);
// sptFreeTimer(block_timer);
// sptFreeTimer(element_timer);
// sptFreeTimer(elementmat_timer);
// sptFreeTimer(blockmat_timer);
return 0;
}
int sptMTTKRPHiCOO_4D_MatrixTiling(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode)
{
sptIndex const nmodes = hitsr->nmodes;
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptElementIndex const stride = mats[0]->stride;
/* Check the mats. */
sptAssert(nmodes == 4);
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptElementIndex const R = mats[mode]->ncols;
sptRankMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptIndex times_mat_index_1 = mats_order[1];
sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1];
sptIndex times_mat_index_2 = mats_order[2];
sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2];
sptIndex times_mat_index_3 = mats_order[3];
sptRankMatrix * restrict times_mat_3 = mats[times_mat_index_3];
sptElementIndex mode_i;
sptElementIndex tmp_i_1, tmp_i_2, tmp_i_3;
sptValue entry;
sptValue * restrict blocked_mvals;
sptValue * restrict blocked_times_mat_1;
sptValue * restrict blocked_times_mat_2;
sptValue * restrict blocked_times_mat_3;
/* Loop kernels */
// sptTimer loop_timer, kernel_timer, block_timer, element_timer, elementmat_timer, blockmat_timer;
// double loop_etime = 0, kernel_etime = 0, block_etime = 0, element_etime = 0, elementmat_etime = 0, blockmat_etime = 0;
// sptNewTimer(&loop_timer, 0);
// sptNewTimer(&kernel_timer, 0);
// sptNewTimer(&block_timer, 0);
// sptNewTimer(&element_timer, 0);
// sptNewTimer(&elementmat_timer, 0);
// sptNewTimer(&blockmat_timer, 0);
// sptStartTimer(loop_timer);
for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) {
sptNnzIndex kptr_begin = hitsr->kptr.data[k];
sptNnzIndex kptr_end = hitsr->kptr.data[k+1];
/* Loop blocks in a kernel */
// printf("kptr_begin: %"PARTI_PRI_NNZ_INDEX", kptr_end: %"PARTI_PRI_NNZ_INDEX"\n", kptr_begin, kptr_end);
// sptStartTimer(kernel_timer);
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
// sptStartTimer(blockmat_timer);
blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride;
blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride;
blocked_times_mat_3 = times_mat_3->values + (hitsr->binds[times_mat_index_3].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
// sptStopTimer(blockmat_timer);
// blockmat_etime += sptElapsedTime(blockmat_timer);
// sptPrintElapsedTime(blockmat_timer, "===Blockmat Timer");
/* Loop entries in a block */
// printf("bptr_begin: %"PARTI_PRI_INDEX", bptr_end: %"PARTI_PRI_INDEX"\n", bptr_begin, bptr_end);
// sptStartTimer(block_timer);
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
// sptStartTimer(elementmat_timer);
mode_i = hitsr->einds[mode].data[z];
tmp_i_1 = hitsr->einds[times_mat_index_1].data[z];
tmp_i_2 = hitsr->einds[times_mat_index_2].data[z];
tmp_i_3 = hitsr->einds[times_mat_index_3].data[z];
// mode_i = (sptBlockMatrixIndex)hitsr->einds[mode].data[z];
// tmp_i_1 = (sptBlockMatrixIndex)hitsr->einds[times_mat_index_1].data[z];
// tmp_i_2 = (sptBlockMatrixIndex)hitsr->einds[times_mat_index_2].data[z];
// tmp_i_3 = (sptBlockMatrixIndex)hitsr->einds[times_mat_index_3].data[z];
entry = vals[z];
sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride;
sptValue * const restrict blocked_times_mat_1_row = blocked_times_mat_1 + tmp_i_1 * stride;
sptValue * const restrict blocked_times_mat_2_row = blocked_times_mat_2 + tmp_i_2 * stride;
sptValue * const restrict blocked_times_mat_3_row = blocked_times_mat_3 + tmp_i_3 * stride;
// sptStopTimer(elementmat_timer);
// elementmat_etime += sptElapsedTime(elementmat_timer);
// sptPrintElapsedTime(elementmat_timer, "===Elementmat Timer");
// sptStartTimer(element_timer);
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
// blocked_mvals[mode_i * stride + r] += entry *
// blocked_times_mat_1[tmp_i_1 * stride + r] *
// blocked_times_mat_2[tmp_i_2 * stride + r] *
// blocked_times_mat_3[tmp_i_3 * stride + r];
bmvals_row[r] += entry *
blocked_times_mat_1_row[r]
* blocked_times_mat_2_row[r]
* blocked_times_mat_3_row[r];
}
// sptStopTimer(element_timer);
// element_etime += sptElapsedTime(element_timer);
// sptPrintElapsedTime(element_timer, "===Element Timer");
} // End loop entries
// sptStopTimer(block_timer);
// block_etime += sptElapsedTime(block_timer);
// sptPrintElapsedTime(block_timer, "==Block Timer");
} // End loop blocks
// sptStopTimer(kernel_timer);
// kernel_etime += sptElapsedTime(kernel_timer);
// sptPrintElapsedTime(kernel_timer, "=Kernel Timer");
} // End loop kernels
// sptStopTimer(loop_timer);
// loop_etime += sptElapsedTime(loop_timer);
// sptPrintElapsedTime(loop_timer, "=Loop Timer");
// printf("\nTotal Elementmat Time: %lf\n", elementmat_etime);
// printf("Total Element Time: %lf\n", element_etime);
// printf("Total Blockmat Time: %lf\n", blockmat_etime);
// printf("Total Block Time: %lf\n", block_etime);
// printf("Total Kernel Time: %lf\n", kernel_etime);
// printf("Total Loop Time: %lf\n\n", loop_etime);
// sptFreeTimer(loop_timer);
// sptFreeTimer(kernel_timer);
// sptFreeTimer(block_timer);
// sptFreeTimer(element_timer);
// sptFreeTimer(elementmat_timer);
// sptFreeTimer(blockmat_timer);
return 0;
}
int sptMTTKRPHiCOO_3D_MatrixTiling_init(
sptSparseTensorHiCOO const * const hitsr,
sptMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode)
{
sptIndex const nmodes = hitsr->nmodes;
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptIndex const stride = mats[0]->stride;
/* Check the mats. */
sptAssert(nmodes ==3);
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptIndex const R = mats[mode]->ncols;
sptMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptIndex times_mat_index_1 = mats_order[1];
sptMatrix * restrict times_mat_1 = mats[times_mat_index_1];
sptIndex times_mat_index_2 = mats_order[2];
sptMatrix * restrict times_mat_2 = mats[times_mat_index_2];
sptElementIndex mode_i;
sptElementIndex tmp_i_1, tmp_i_2;
sptValue entry;
sptValue * blocked_mvals;
sptValue * blocked_times_mat_1;
sptValue * blocked_times_mat_2;
/* Loop kernels */
for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) {
sptNnzIndex kptr_begin = hitsr->kptr.data[k];
sptNnzIndex kptr_end = hitsr->kptr.data[k+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride;
blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
mode_i = hitsr->einds[mode].data[z];
tmp_i_1 = hitsr->einds[times_mat_index_1].data[z];
tmp_i_2 = hitsr->einds[times_mat_index_2].data[z];
entry = vals[z];
for(sptIndex r=0; r<R; ++r) {
blocked_mvals[mode_i * stride + r] += entry *
blocked_times_mat_1[tmp_i_1 * stride + r] *
blocked_times_mat_2[tmp_i_2 * stride + r];
}
} // End loop entries
} // End loop blocks
} // End loop kernels
return 0;
}
|
heptane_3sp.c |
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#if defined(BL_FORT_USE_UPPERCASE)
#define CKINDX CKINDX
#define CKINIT CKINIT
#define CKFINALIZE CKFINALIZE
#define CKXNUM CKXNUM
#define CKSYME CKSYME
#define CKSYMS CKSYMS
#define CKRP CKRP
#define CKPX CKPX
#define CKPY CKPY
#define CKPC CKPC
#define CKRHOX CKRHOX
#define CKRHOY CKRHOY
#define CKRHOC CKRHOC
#define CKWT CKWT
#define CKAWT CKAWT
#define CKMMWY CKMMWY
#define CKMMWX CKMMWX
#define CKMMWC CKMMWC
#define CKYTX CKYTX
#define CKYTCP CKYTCP
#define CKYTCR CKYTCR
#define CKXTY CKXTY
#define CKXTCP CKXTCP
#define CKXTCR CKXTCR
#define CKCTX CKCTX
#define CKCTY CKCTY
#define CKCPOR CKCPOR
#define CKHORT CKHORT
#define CKSOR CKSOR
#define CKCVML CKCVML
#define CKCPML CKCPML
#define CKUML CKUML
#define CKHML CKHML
#define CKGML CKGML
#define CKAML CKAML
#define CKSML CKSML
#define CKCVMS CKCVMS
#define CKCPMS CKCPMS
#define CKUMS CKUMS
#define CKHMS CKHMS
#define CKGMS CKGMS
#define CKAMS CKAMS
#define CKSMS CKSMS
#define CKCPBL CKCPBL
#define CKCPBS CKCPBS
#define CKCVBL CKCVBL
#define CKCVBS CKCVBS
#define CKHBML CKHBML
#define CKHBMS CKHBMS
#define CKUBML CKUBML
#define CKUBMS CKUBMS
#define CKSBML CKSBML
#define CKSBMS CKSBMS
#define CKGBML CKGBML
#define CKGBMS CKGBMS
#define CKABML CKABML
#define CKABMS CKABMS
#define CKWC CKWC
#define CKWYP CKWYP
#define CKWXP CKWXP
#define CKWYR CKWYR
#define CKWXR CKWXR
#define CKQC CKQC
#define CKKFKR CKKFKR
#define CKQYP CKQYP
#define CKQXP CKQXP
#define CKQYR CKQYR
#define CKQXR CKQXR
#define CKNU CKNU
#define CKNCF CKNCF
#define CKABE CKABE
#define CKEQC CKEQC
#define CKEQYP CKEQYP
#define CKEQXP CKEQXP
#define CKEQYR CKEQYR
#define CKEQXR CKEQXR
#define DWDOT DWDOT
#define VCKHMS VCKHMS
#define VCKPY VCKPY
#define VCKWYR VCKWYR
#define VCKYTX VCKYTX
#define GET_T_GIVEN_EY GET_T_GIVEN_EY
#define GET_T_GIVEN_HY GET_T_GIVEN_HY
#define GET_REACTION_MAP GET_REACTION_MAP
#define GET_CRITPARAMS GET_CRITPARAMS
#elif defined(BL_FORT_USE_LOWERCASE)
#define CKINDX ckindx
#define CKINIT ckinit
#define CKFINALIZE ckfinalize
#define CKXNUM ckxnum
#define CKSYME cksyme
#define CKSYMS cksyms
#define CKRP ckrp
#define CKPX ckpx
#define CKPY ckpy
#define CKPC ckpc
#define CKRHOX ckrhox
#define CKRHOY ckrhoy
#define CKRHOC ckrhoc
#define CKWT ckwt
#define CKAWT ckawt
#define CKMMWY ckmmwy
#define CKMMWX ckmmwx
#define CKMMWC ckmmwc
#define CKYTX ckytx
#define CKYTCP ckytcp
#define CKYTCR ckytcr
#define CKXTY ckxty
#define CKXTCP ckxtcp
#define CKXTCR ckxtcr
#define CKCTX ckctx
#define CKCTY ckcty
#define CKCPOR ckcpor
#define CKHORT ckhort
#define CKSOR cksor
#define CKCVML ckcvml
#define CKCPML ckcpml
#define CKUML ckuml
#define CKHML ckhml
#define CKGML ckgml
#define CKAML ckaml
#define CKSML cksml
#define CKCVMS ckcvms
#define CKCPMS ckcpms
#define CKUMS ckums
#define CKHMS ckhms
#define CKGMS ckgms
#define CKAMS ckams
#define CKSMS cksms
#define CKCPBL ckcpbl
#define CKCPBS ckcpbs
#define CKCVBL ckcvbl
#define CKCVBS ckcvbs
#define CKHBML ckhbml
#define CKHBMS ckhbms
#define CKUBML ckubml
#define CKUBMS ckubms
#define CKSBML cksbml
#define CKSBMS cksbms
#define CKGBML ckgbml
#define CKGBMS ckgbms
#define CKABML ckabml
#define CKABMS ckabms
#define CKWC ckwc
#define CKWYP ckwyp
#define CKWXP ckwxp
#define CKWYR ckwyr
#define CKWXR ckwxr
#define CKQC ckqc
#define CKKFKR ckkfkr
#define CKQYP ckqyp
#define CKQXP ckqxp
#define CKQYR ckqyr
#define CKQXR ckqxr
#define CKNU cknu
#define CKNCF ckncf
#define CKABE ckabe
#define CKEQC ckeqc
#define CKEQYP ckeqyp
#define CKEQXP ckeqxp
#define CKEQYR ckeqyr
#define CKEQXR ckeqxr
#define DWDOT dwdot
#define VCKHMS vckhms
#define VCKPY vckpy
#define VCKWYR vckwyr
#define VCKYTX vckytx
#define GET_T_GIVEN_EY get_t_given_ey
#define GET_T_GIVEN_HY get_t_given_hy
#define GET_REACTION_MAP get_reaction_map
#define GET_CRITPARAMS get_critparams
#elif defined(BL_FORT_USE_UNDERSCORE)
#define CKINDX ckindx_
#define CKINIT ckinit_
#define CKFINALIZE ckfinalize_
#define CKXNUM ckxnum_
#define CKSYME cksyme_
#define CKSYMS cksyms_
#define CKRP ckrp_
#define CKPX ckpx_
#define CKPY ckpy_
#define CKPC ckpc_
#define CKRHOX ckrhox_
#define CKRHOY ckrhoy_
#define CKRHOC ckrhoc_
#define CKWT ckwt_
#define CKAWT ckawt_
#define CKMMWY ckmmwy_
#define CKMMWX ckmmwx_
#define CKMMWC ckmmwc_
#define CKYTX ckytx_
#define CKYTCP ckytcp_
#define CKYTCR ckytcr_
#define CKXTY ckxty_
#define CKXTCP ckxtcp_
#define CKXTCR ckxtcr_
#define CKCTX ckctx_
#define CKCTY ckcty_
#define CKCPOR ckcpor_
#define CKHORT ckhort_
#define CKSOR cksor_
#define CKCVML ckcvml_
#define CKCPML ckcpml_
#define CKUML ckuml_
#define CKHML ckhml_
#define CKGML ckgml_
#define CKAML ckaml_
#define CKSML cksml_
#define CKCVMS ckcvms_
#define CKCPMS ckcpms_
#define CKUMS ckums_
#define CKHMS ckhms_
#define CKGMS ckgms_
#define CKAMS ckams_
#define CKSMS cksms_
#define CKCPBL ckcpbl_
#define CKCPBS ckcpbs_
#define CKCVBL ckcvbl_
#define CKCVBS ckcvbs_
#define CKHBML ckhbml_
#define CKHBMS ckhbms_
#define CKUBML ckubml_
#define CKUBMS ckubms_
#define CKSBML cksbml_
#define CKSBMS cksbms_
#define CKGBML ckgbml_
#define CKGBMS ckgbms_
#define CKABML ckabml_
#define CKABMS ckabms_
#define CKWC ckwc_
#define CKWYP ckwyp_
#define CKWXP ckwxp_
#define CKWYR ckwyr_
#define CKWXR ckwxr_
#define CKQC ckqc_
#define CKKFKR ckkfkr_
#define CKQYP ckqyp_
#define CKQXP ckqxp_
#define CKQYR ckqyr_
#define CKQXR ckqxr_
#define CKNU cknu_
#define CKNCF ckncf_
#define CKABE ckabe_
#define CKEQC ckeqc_
#define CKEQYP ckeqyp_
#define CKEQXP ckeqxp_
#define CKEQYR ckeqyr_
#define CKEQXR ckeqxr_
#define DWDOT dwdot_
#define VCKHMS vckhms_
#define VCKPY vckpy_
#define VCKWYR vckwyr_
#define VCKYTX vckytx_
#define GET_T_GIVEN_EY get_t_given_ey_
#define GET_T_GIVEN_HY get_t_given_hy_
#define GET_REACTION_MAP get_reaction_map_
#define GET_CRITPARAMS get_critparams_
#endif
/*function declarations */
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetEPS EGTRANSETEPS
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetEPS egtranseteps
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetEPS egtranseteps_
#endif
void egtransetEPS(double * EPS);
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetSIG EGTRANSETSIG
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetSIG egtransetsig
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetSIG egtransetsig_
#endif
void egtransetSIG(double* SIG);
void atomicWeight(double * restrict awt);
void molecularWeight(double * restrict wt);
void gibbs(double * restrict species, double * restrict tc);
void helmholtz(double * restrict species, double * restrict tc);
void speciesInternalEnergy(double * restrict species, double * restrict tc);
void speciesEnthalpy(double * restrict species, double * restrict tc);
void speciesEntropy(double * restrict species, double * restrict tc);
void cp_R(double * restrict species, double * restrict tc);
void cv_R(double * restrict species, double * restrict tc);
void equilibriumConstants(double * restrict kc, double * restrict g_RT, double T);
void productionRate(double * restrict wdot, double * restrict sc, double T);
void comp_k_f(double * restrict tc, double invT, double * restrict k_f);
void comp_Kc(double * restrict tc, double invT, double * restrict Kc);
void comp_qfqr(double * restrict q_f, double * restrict q_r, double * restrict sc, double * restrict tc, double invT);
void progressRate(double * restrict qdot, double * restrict speciesConc, double T);
void progressRateFR(double * restrict q_f, double * restrict q_r, double * restrict speciesConc, double T);
void CKINIT();
void CKFINALIZE();
void CKINDX(int * iwrk, double * restrict rwrk, int * mm, int * kk, int * ii, int * nfit );
void CKXNUM(char * line, int * nexp, int * lout, int * nval, double * restrict rval, int * kerr, int lenline);
void CKSNUM(char * line, int * nexp, int * lout, char * kray, int * nn, int * knum, int * nval, double * restrict rval, int * kerr, int lenline, int lenkray);
void CKSYME(int * kname, int * lenkname);
void CKSYMS(int * kname, int * lenkname);
void CKRP(int * ickwrk, double * restrict rckwrk, double * restrict ru, double * restrict ruc, double * restrict pa);
void CKPX(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict P);
void CKPY(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict P);
void CKPC(double * restrict rho, double * restrict T, double * restrict c, int * iwrk, double * restrict rwrk, double * restrict P);
void CKRHOX(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict rho);
void CKRHOY(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict rho);
void CKRHOC(double * restrict P, double * restrict T, double * restrict c, int * iwrk, double * restrict rwrk, double * restrict rho);
void CKWT(int * iwrk, double * restrict rwrk, double * restrict wt);
void CKAWT(int * iwrk, double * restrict rwrk, double * restrict awt);
void CKMMWY(double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wtm);
void CKMMWX(double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wtm);
void CKMMWC(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict wtm);
void CKYTX(double * restrict y, int * iwrk, double * restrict rwrk, double * restrict x);
void CKYTCP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict c);
void CKYTCR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict c);
void CKXTY(double * restrict x, int * iwrk, double * restrict rwrk, double * restrict y);
void CKXTCP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict c);
void CKXTCR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict c);
void CKCTX(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict x);
void CKCTY(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict y);
void CKCPOR(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cpor);
void CKHORT(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict hort);
void CKSOR(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sor);
void CKCVML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvml);
void CKCPML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvml);
void CKUML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict uml);
void CKHML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict uml);
void CKGML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict gml);
void CKAML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict aml);
void CKSML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sml);
void CKCVMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvms);
void CKCPMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvms);
void CKUMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ums);
void CKHMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ums);
void CKGMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict gms);
void CKAMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ams);
void CKSMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sms);
void CKCPBL(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict cpbl);
void CKCPBS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict cpbs);
void CKCVBL(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict cpbl);
void CKCVBS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict cpbs);
void CKHBML(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict hbml);
void CKHBMS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict hbms);
void CKUBML(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict ubml);
void CKUBMS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict ubms);
void CKSBML(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict sbml);
void CKSBMS(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict sbms);
void CKGBML(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict gbml);
void CKGBMS(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict gbms);
void CKABML(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict abml);
void CKABMS(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict abms);
void CKWC(double * restrict T, double * restrict C, int * iwrk, double * restrict rwrk, double * restrict wdot);
void CKWYP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wdot);
void CKWXP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wdot);
void CKWYR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wdot);
void CKWXR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wdot);
void CKQC(double * restrict T, double * restrict C, int * iwrk, double * restrict rwrk, double * restrict qdot);
void CKKFKR(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict q_f, double * restrict q_r);
void CKQYP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict qdot);
void CKQXP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict qdot);
void CKQYR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict qdot);
void CKQXR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict qdot);
void CKNU(int * kdim, int * iwrk, double * restrict rwrk, int * nuki);
void CKNCF(int * mdim, int * iwrk, double * restrict rwrk, int * ncf);
void CKABE(int * iwrk, double * restrict rwrk, double * restrict a, double * restrict b, double * restrict e );
void CKEQC(double * restrict T, double * restrict C , int * iwrk, double * restrict rwrk, double * restrict eqcon );
void CKEQYP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict eqcon);
void CKEQXP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict eqcon);
void CKEQYR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict eqcon);
void CKEQXR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict eqcon);
void DWDOT(double * restrict J, double * restrict sc, double * restrict T, int * consP);
void aJacobian(double * restrict J, double * restrict sc, double T, int consP);
void dcvpRdT(double * restrict species, double * restrict tc);
void GET_T_GIVEN_EY(double * restrict e, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict t, int *ierr);
void GET_T_GIVEN_HY(double * restrict h, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict t, int *ierr);
void GET_REACTION_MAP(int * restrict rmap);
/*vector version */
void vproductionRate(int npt, double * restrict wdot, double * restrict c, double * restrict T);
void VCKHMS(int * restrict np, double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ums);
void VCKPY(int * restrict np, double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict P);
void VCKWYR(int * restrict np, double * restrict rho, double * restrict T,
double * restrict y, int * restrict iwrk, double * restrict rwrk,
double * restrict wdot);
void VCKYTX(int * restrict np, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict x);
void vcomp_k_f(int npt, double * restrict k_f_s, double * restrict tc, double * restrict invT);
void vcomp_gibbs(int npt, double * restrict g_RT, double * restrict tc);
void vcomp_Kc(int npt, double * restrict Kc_s, double * restrict g_RT, double * restrict invT);
void GET_CRITPARAMS(double * restrict Tci, double * restrict ai, double * restrict bi, double * restrict acentric_i);
void vcomp_wdot(int npt, double * restrict wdot, double * restrict mixture, double * restrict sc,
double * restrict k_f_s, double * restrict Kc_s,
double * restrict tc, double * restrict invT, double * restrict T);
/* Inverse molecular weights */
static const double imw[3] = {
1.0 / 100.205570, /*NC7H16 */
1.0 / 31.998800, /*O2 */
1.0 / 28.013400}; /*N2 */
static double fwd_A[0], fwd_beta[0], fwd_Ea[0];
static double low_A[0], low_beta[0], low_Ea[0];
static double rev_A[0], rev_beta[0], rev_Ea[0];
static double troe_a[0],troe_Ts[0], troe_Tss[0], troe_Tsss[0];
static double sri_a[0], sri_b[0], sri_c[0], sri_d[0], sri_e[0];
static double activation_units[0], prefactor_units[0], phase_units[0];
static int is_PD[0], troe_len[0], sri_len[0], nTB[0], *TBid[0];
static double *TB[0];
static double fwd_A_DEF[0], fwd_beta_DEF[0], fwd_Ea_DEF[0];
static double low_A_DEF[0], low_beta_DEF[0], low_Ea_DEF[0];
static double rev_A_DEF[0], rev_beta_DEF[0], rev_Ea_DEF[0];
static double troe_a_DEF[0],troe_Ts_DEF[0], troe_Tss_DEF[0], troe_Tsss_DEF[0];
static double sri_a_DEF[0], sri_b_DEF[0], sri_c_DEF[0], sri_d_DEF[0], sri_e_DEF[0];
static double activation_units_DEF[0], prefactor_units_DEF[0], phase_units_DEF[0];
static int is_PD_DEF[0], troe_len_DEF[0], sri_len_DEF[0], nTB_DEF[0], *TBid_DEF[0];
static double *TB_DEF[0];
static int rxn_map[0] = {};
void GET_REACTION_MAP(int *rmap)
{
for (int i=0; i<0; ++i) {
rmap[i] = rxn_map[i];
}
}
#include <ReactionData.H>
double* GetParamPtr(int reaction_id,
REACTION_PARAMETER param_id,
int species_id,
int get_default)
{
double* ret = 0;
if (reaction_id<0 || reaction_id>=0) {
printf("Bad reaction id = %d",reaction_id);
abort();
};
int mrid = rxn_map[reaction_id];
if (param_id == THIRD_BODY) {
if (species_id<0 || species_id>=3) {
printf("GetParamPtr: Bad species id = %d",species_id);
abort();
}
if (get_default) {
for (int i=0; i<nTB_DEF[mrid]; ++i) {
if (species_id == TBid_DEF[mrid][i]) {
ret = &(TB_DEF[mrid][i]);
}
}
}
else {
for (int i=0; i<nTB[mrid]; ++i) {
if (species_id == TBid[mrid][i]) {
ret = &(TB[mrid][i]);
}
}
}
if (ret == 0) {
printf("GetParamPtr: No TB for reaction id = %d",reaction_id);
abort();
}
}
else {
if ( param_id == FWD_A) {ret = (get_default ? &(fwd_A_DEF[mrid]) : &(fwd_A[mrid]));}
else if (param_id == FWD_BETA) {ret = (get_default ? &(fwd_beta_DEF[mrid]) : &(fwd_beta[mrid]));}
else if (param_id == FWD_EA) {ret = (get_default ? &(fwd_Ea_DEF[mrid]) : &(fwd_Ea[mrid]));}
else if (param_id == LOW_A) {ret = (get_default ? &(low_A_DEF[mrid]) : &(low_A[mrid]));}
else if (param_id == LOW_BETA) {ret = (get_default ? &(low_beta_DEF[mrid]) : &(low_beta[mrid]));}
else if (param_id == LOW_EA) {ret = (get_default ? &(low_Ea_DEF[mrid]) : &(low_Ea[mrid]));}
else if (param_id == REV_A) {ret = (get_default ? &(rev_A_DEF[mrid]) : &(rev_A[mrid]));}
else if (param_id == REV_BETA) {ret = (get_default ? &(rev_beta_DEF[mrid]) : &(rev_beta[mrid]));}
else if (param_id == REV_EA) {ret = (get_default ? &(rev_Ea_DEF[mrid]) : &(rev_Ea[mrid]));}
else if (param_id == TROE_A) {ret = (get_default ? &(troe_a_DEF[mrid]) : &(troe_a[mrid]));}
else if (param_id == TROE_TS) {ret = (get_default ? &(troe_Ts_DEF[mrid]) : &(troe_Ts[mrid]));}
else if (param_id == TROE_TSS) {ret = (get_default ? &(troe_Tss_DEF[mrid]) : &(troe_Tss[mrid]));}
else if (param_id == TROE_TSSS) {ret = (get_default ? &(troe_Tsss_DEF[mrid]) : &(troe_Tsss[mrid]));}
else if (param_id == SRI_A) {ret = (get_default ? &(sri_a_DEF[mrid]) : &(sri_a[mrid]));}
else if (param_id == SRI_B) {ret = (get_default ? &(sri_b_DEF[mrid]) : &(sri_b[mrid]));}
else if (param_id == SRI_C) {ret = (get_default ? &(sri_c_DEF[mrid]) : &(sri_c[mrid]));}
else if (param_id == SRI_D) {ret = (get_default ? &(sri_d_DEF[mrid]) : &(sri_d[mrid]));}
else if (param_id == SRI_E) {ret = (get_default ? &(sri_e_DEF[mrid]) : &(sri_e[mrid]));}
else {
printf("GetParamPtr: Unknown parameter id");
abort();
}
}
return ret;
}
void ResetAllParametersToDefault()
{
for (int i=0; i<0; i++) {
if (nTB[i] != 0) {
nTB[i] = 0;
free(TB[i]);
free(TBid[i]);
}
fwd_A[i] = fwd_A_DEF[i];
fwd_beta[i] = fwd_beta_DEF[i];
fwd_Ea[i] = fwd_Ea_DEF[i];
low_A[i] = low_A_DEF[i];
low_beta[i] = low_beta_DEF[i];
low_Ea[i] = low_Ea_DEF[i];
rev_A[i] = rev_A_DEF[i];
rev_beta[i] = rev_beta_DEF[i];
rev_Ea[i] = rev_Ea_DEF[i];
troe_a[i] = troe_a_DEF[i];
troe_Ts[i] = troe_Ts_DEF[i];
troe_Tss[i] = troe_Tss_DEF[i];
troe_Tsss[i] = troe_Tsss_DEF[i];
sri_a[i] = sri_a_DEF[i];
sri_b[i] = sri_b_DEF[i];
sri_c[i] = sri_c_DEF[i];
sri_d[i] = sri_d_DEF[i];
sri_e[i] = sri_e_DEF[i];
is_PD[i] = is_PD_DEF[i];
troe_len[i] = troe_len_DEF[i];
sri_len[i] = sri_len_DEF[i];
activation_units[i] = activation_units_DEF[i];
prefactor_units[i] = prefactor_units_DEF[i];
phase_units[i] = phase_units_DEF[i];
nTB[i] = nTB_DEF[i];
if (nTB[i] != 0) {
TB[i] = (double *) malloc(sizeof(double) * nTB[i]);
TBid[i] = (int *) malloc(sizeof(int) * nTB[i]);
for (int j=0; j<nTB[i]; j++) {
TB[i][j] = TB_DEF[i][j];
TBid[i][j] = TBid_DEF[i][j];
}
}
}
}
void SetAllDefaults()
{
for (int i=0; i<0; i++) {
if (nTB_DEF[i] != 0) {
nTB_DEF[i] = 0;
free(TB_DEF[i]);
free(TBid_DEF[i]);
}
fwd_A_DEF[i] = fwd_A[i];
fwd_beta_DEF[i] = fwd_beta[i];
fwd_Ea_DEF[i] = fwd_Ea[i];
low_A_DEF[i] = low_A[i];
low_beta_DEF[i] = low_beta[i];
low_Ea_DEF[i] = low_Ea[i];
rev_A_DEF[i] = rev_A[i];
rev_beta_DEF[i] = rev_beta[i];
rev_Ea_DEF[i] = rev_Ea[i];
troe_a_DEF[i] = troe_a[i];
troe_Ts_DEF[i] = troe_Ts[i];
troe_Tss_DEF[i] = troe_Tss[i];
troe_Tsss_DEF[i] = troe_Tsss[i];
sri_a_DEF[i] = sri_a[i];
sri_b_DEF[i] = sri_b[i];
sri_c_DEF[i] = sri_c[i];
sri_d_DEF[i] = sri_d[i];
sri_e_DEF[i] = sri_e[i];
is_PD_DEF[i] = is_PD[i];
troe_len_DEF[i] = troe_len[i];
sri_len_DEF[i] = sri_len[i];
activation_units_DEF[i] = activation_units[i];
prefactor_units_DEF[i] = prefactor_units[i];
phase_units_DEF[i] = phase_units[i];
nTB_DEF[i] = nTB[i];
if (nTB_DEF[i] != 0) {
TB_DEF[i] = (double *) malloc(sizeof(double) * nTB_DEF[i]);
TBid_DEF[i] = (int *) malloc(sizeof(int) * nTB_DEF[i]);
for (int j=0; j<nTB_DEF[i]; j++) {
TB_DEF[i][j] = TB[i][j];
TBid_DEF[i][j] = TBid[i][j];
}
}
}
}
/* Finalizes parameter database */
void CKFINALIZE()
{
for (int i=0; i<0; ++i) {
free(TB[i]); TB[i] = 0;
free(TBid[i]); TBid[i] = 0;
nTB[i] = 0;
free(TB_DEF[i]); TB_DEF[i] = 0;
free(TBid_DEF[i]); TBid_DEF[i] = 0;
nTB_DEF[i] = 0;
}
}
/* Initializes parameter database */
void CKINIT()
{
SetAllDefaults();
}
/*A few mechanism parameters */
void CKINDX(int * iwrk, double * restrict rwrk, int * mm, int * kk, int * ii, int * nfit)
{
*mm = 4;
*kk = 3;
*ii = 0;
*nfit = -1; /*Why do you need this anyway ? */
}
/* ckxnum... for parsing strings */
void CKXNUM(char * line, int * nexp, int * lout, int * nval, double * restrict rval, int * kerr, int lenline )
{
int n,i; /*Loop Counters */
char cstr[1000];
char *saveptr;
char *p; /*String Tokens */
/* Strip Comments */
for (i=0; i<lenline; ++i) {
if (line[i]=='!') {
break;
}
cstr[i] = line[i];
}
cstr[i] = '\0';
p = strtok_r(cstr," ", &saveptr);
if (!p) {
*nval = 0;
*kerr = 1;
return;
}
for (n=0; n<*nexp; ++n) {
rval[n] = atof(p);
p = strtok_r(NULL, " ", &saveptr);
if (!p) break;
}
*nval = n+1;
if (*nval < *nexp) *kerr = 1;
return;
}
/* cksnum... for parsing strings */
void CKSNUM(char * line, int * nexp, int * lout, char * kray, int * nn, int * knum, int * nval, double * restrict rval, int * kerr, int lenline, int lenkray)
{
/*Not done yet ... */
}
/* Returns the char strings of element names */
void CKSYME(int * kname, int * plenkname )
{
int i; /*Loop Counter */
int lenkname = *plenkname;
/*clear kname */
for (i=0; i<lenkname*4; i++) {
kname[i] = ' ';
}
/* C */
kname[ 0*lenkname + 0 ] = 'C';
kname[ 0*lenkname + 1 ] = ' ';
/* H */
kname[ 1*lenkname + 0 ] = 'H';
kname[ 1*lenkname + 1 ] = ' ';
/* O */
kname[ 2*lenkname + 0 ] = 'O';
kname[ 2*lenkname + 1 ] = ' ';
/* N */
kname[ 3*lenkname + 0 ] = 'N';
kname[ 3*lenkname + 1 ] = ' ';
}
/* Returns the char strings of species names */
void CKSYMS(int * kname, int * plenkname )
{
int i; /*Loop Counter */
int lenkname = *plenkname;
/*clear kname */
for (i=0; i<lenkname*3; i++) {
kname[i] = ' ';
}
/* NC7H16 */
kname[ 0*lenkname + 0 ] = 'N';
kname[ 0*lenkname + 1 ] = 'C';
kname[ 0*lenkname + 2 ] = '7';
kname[ 0*lenkname + 3 ] = 'H';
kname[ 0*lenkname + 4 ] = '1';
kname[ 0*lenkname + 5 ] = '6';
kname[ 0*lenkname + 6 ] = ' ';
/* O2 */
kname[ 1*lenkname + 0 ] = 'O';
kname[ 1*lenkname + 1 ] = '2';
kname[ 1*lenkname + 2 ] = ' ';
/* N2 */
kname[ 2*lenkname + 0 ] = 'N';
kname[ 2*lenkname + 1 ] = '2';
kname[ 2*lenkname + 2 ] = ' ';
}
/* Returns R, Rc, Patm */
void CKRP(int * ickwrk, double * restrict rckwrk, double * restrict ru, double * restrict ruc, double * restrict pa)
{
*ru = 8.31451e+07;
*ruc = 1.98721558317399615845;
*pa = 1.01325e+06;
}
/*Compute P = rhoRT/W(x) */
void CKPX(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict P)
{
double XW = 0;/* To hold mean molecular wt */
XW += x[0]*100.205570; /*NC7H16 */
XW += x[1]*31.998800; /*O2 */
XW += x[2]*28.013400; /*N2 */
*P = *rho * 8.31451e+07 * (*T) / XW; /*P = rho*R*T/W */
return;
}
/*Compute P = rhoRT/W(y) */
void CKPY(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict P)
{
double YOW = 0;/* for computing mean MW */
YOW += y[0]*imw[0]; /*NC7H16 */
YOW += y[1]*imw[1]; /*O2 */
YOW += y[2]*imw[2]; /*N2 */
*P = *rho * 8.31451e+07 * (*T) * YOW; /*P = rho*R*T/W */
return;
}
/*Compute P = rhoRT/W(y) */
void VCKPY(int * restrict np, double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict P)
{
double YOW[*np];
for (int i=0; i<(*np); i++) {
YOW[i] = 0.0;
}
for (int n=0; n<3; n++) {
for (int i=0; i<(*np); i++) {
YOW[i] += y[n*(*np)+i] * imw[n];
}
}
for (int i=0; i<(*np); i++) {
P[i] = rho[i] * 8.31451e+07 * T[i] * YOW[i]; /*P = rho*R*T/W */
}
return;
}
/*Compute P = rhoRT/W(c) */
void CKPC(double * restrict rho, double * restrict T, double * restrict c, int * iwrk, double * restrict rwrk, double * restrict P)
{
int id; /*loop counter */
/*See Eq 5 in CK Manual */
double W = 0;
double sumC = 0;
W += c[0]*100.205570; /*NC7H16 */
W += c[1]*31.998800; /*O2 */
W += c[2]*28.013400; /*N2 */
for (id = 0; id < 3; ++id) {
sumC += c[id];
}
*P = *rho * 8.31451e+07 * (*T) * sumC / W; /*P = rho*R*T/W */
return;
}
/*Compute rho = PW(x)/RT */
void CKRHOX(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict rho)
{
double XW = 0;/* To hold mean molecular wt */
XW += x[0]*100.205570; /*NC7H16 */
XW += x[1]*31.998800; /*O2 */
XW += x[2]*28.013400; /*N2 */
*rho = *P * XW / (8.31451e+07 * (*T)); /*rho = P*W/(R*T) */
return;
}
/*Compute rho = P*W(y)/RT */
void CKRHOY(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict rho)
{
double YOW = 0;
double tmp[3];
for (int i = 0; i < 3; i++)
{
tmp[i] = y[i]*imw[i];
}
for (int i = 0; i < 3; i++)
{
YOW += tmp[i];
}
*rho = *P / (8.31451e+07 * (*T) * YOW);/*rho = P*W/(R*T) */
return;
}
/*Compute rho = P*W(c)/(R*T) */
void CKRHOC(double * restrict P, double * restrict T, double * restrict c, int * iwrk, double * restrict rwrk, double * restrict rho)
{
int id; /*loop counter */
/*See Eq 5 in CK Manual */
double W = 0;
double sumC = 0;
W += c[0]*100.205570; /*NC7H16 */
W += c[1]*31.998800; /*O2 */
W += c[2]*28.013400; /*N2 */
for (id = 0; id < 3; ++id) {
sumC += c[id];
}
*rho = *P * W / (sumC * (*T) * 8.31451e+07); /*rho = PW/(R*T) */
return;
}
/*get molecular weight for all species */
void CKWT(int * iwrk, double * restrict rwrk, double * restrict wt)
{
molecularWeight(wt);
}
/*get atomic weight for all elements */
void CKAWT(int * iwrk, double * restrict rwrk, double * restrict awt)
{
atomicWeight(awt);
}
/*given y[species]: mass fractions */
/*returns mean molecular weight (gm/mole) */
void CKMMWY(double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wtm)
{
double YOW = 0;
double tmp[3];
for (int i = 0; i < 3; i++)
{
tmp[i] = y[i]*imw[i];
}
for (int i = 0; i < 3; i++)
{
YOW += tmp[i];
}
*wtm = 1.0 / YOW;
return;
}
/*given x[species]: mole fractions */
/*returns mean molecular weight (gm/mole) */
void CKMMWX(double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wtm)
{
double XW = 0;/* see Eq 4 in CK Manual */
XW += x[0]*100.205570; /*NC7H16 */
XW += x[1]*31.998800; /*O2 */
XW += x[2]*28.013400; /*N2 */
*wtm = XW;
return;
}
/*given c[species]: molar concentration */
/*returns mean molecular weight (gm/mole) */
void CKMMWC(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict wtm)
{
int id; /*loop counter */
/*See Eq 5 in CK Manual */
double W = 0;
double sumC = 0;
W += c[0]*100.205570; /*NC7H16 */
W += c[1]*31.998800; /*O2 */
W += c[2]*28.013400; /*N2 */
for (id = 0; id < 3; ++id) {
sumC += c[id];
}
/* CK provides no guard against divison by zero */
*wtm = W/sumC;
return;
}
/*convert y[species] (mass fracs) to x[species] (mole fracs) */
void CKYTX(double * restrict y, int * iwrk, double * restrict rwrk, double * restrict x)
{
double YOW = 0;
double tmp[3];
for (int i = 0; i < 3; i++)
{
tmp[i] = y[i]*imw[i];
}
for (int i = 0; i < 3; i++)
{
YOW += tmp[i];
}
double YOWINV = 1.0/YOW;
for (int i = 0; i < 3; i++)
{
x[i] = y[i]*imw[i]*YOWINV;
}
return;
}
/*convert y[npoints*species] (mass fracs) to x[npoints*species] (mole fracs) */
void VCKYTX(int * restrict np, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict x)
{
double YOW[*np];
for (int i=0; i<(*np); i++) {
YOW[i] = 0.0;
}
for (int n=0; n<3; n++) {
for (int i=0; i<(*np); i++) {
x[n*(*np)+i] = y[n*(*np)+i] * imw[n];
YOW[i] += x[n*(*np)+i];
}
}
for (int i=0; i<(*np); i++) {
YOW[i] = 1.0/YOW[i];
}
for (int n=0; n<3; n++) {
for (int i=0; i<(*np); i++) {
x[n*(*np)+i] *= YOW[i];
}
}
}
/*convert y[species] (mass fracs) to c[species] (molar conc) */
void CKYTCP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict c)
{
double YOW = 0;
double PWORT;
/*Compute inverse of mean molecular wt first */
for (int i = 0; i < 3; i++)
{
c[i] = y[i]*imw[i];
}
for (int i = 0; i < 3; i++)
{
YOW += c[i];
}
/*PW/RT (see Eq. 7) */
PWORT = (*P)/(YOW * 8.31451e+07 * (*T));
/*Now compute conversion */
for (int i = 0; i < 3; i++)
{
c[i] = PWORT * y[i] * imw[i];
}
return;
}
/*convert y[species] (mass fracs) to c[species] (molar conc) */
void CKYTCR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict c)
{
for (int i = 0; i < 3; i++)
{
c[i] = (*rho) * y[i] * imw[i];
}
}
/*convert x[species] (mole fracs) to y[species] (mass fracs) */
void CKXTY(double * restrict x, int * iwrk, double * restrict rwrk, double * restrict y)
{
double XW = 0; /*See Eq 4, 9 in CK Manual */
/*Compute mean molecular wt first */
XW += x[0]*100.205570; /*NC7H16 */
XW += x[1]*31.998800; /*O2 */
XW += x[2]*28.013400; /*N2 */
/*Now compute conversion */
double XWinv = 1.0/XW;
y[0] = x[0]*100.205570*XWinv;
y[1] = x[1]*31.998800*XWinv;
y[2] = x[2]*28.013400*XWinv;
return;
}
/*convert x[species] (mole fracs) to c[species] (molar conc) */
void CKXTCP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict c)
{
int id; /*loop counter */
double PORT = (*P)/(8.31451e+07 * (*T)); /*P/RT */
/*Compute conversion, see Eq 10 */
for (id = 0; id < 3; ++id) {
c[id] = x[id]*PORT;
}
return;
}
/*convert x[species] (mole fracs) to c[species] (molar conc) */
void CKXTCR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict c)
{
int id; /*loop counter */
double XW = 0; /*See Eq 4, 11 in CK Manual */
double ROW;
/*Compute mean molecular wt first */
XW += x[0]*100.205570; /*NC7H16 */
XW += x[1]*31.998800; /*O2 */
XW += x[2]*28.013400; /*N2 */
ROW = (*rho) / XW;
/*Compute conversion, see Eq 11 */
for (id = 0; id < 3; ++id) {
c[id] = x[id]*ROW;
}
return;
}
/*convert c[species] (molar conc) to x[species] (mole fracs) */
void CKCTX(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict x)
{
int id; /*loop counter */
double sumC = 0;
/*compute sum of c */
for (id = 0; id < 3; ++id) {
sumC += c[id];
}
/* See Eq 13 */
double sumCinv = 1.0/sumC;
for (id = 0; id < 3; ++id) {
x[id] = c[id]*sumCinv;
}
return;
}
/*convert c[species] (molar conc) to y[species] (mass fracs) */
void CKCTY(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict y)
{
double CW = 0; /*See Eq 12 in CK Manual */
/*compute denominator in eq 12 first */
CW += c[0]*100.205570; /*NC7H16 */
CW += c[1]*31.998800; /*O2 */
CW += c[2]*28.013400; /*N2 */
/*Now compute conversion */
double CWinv = 1.0/CW;
y[0] = c[0]*100.205570*CWinv;
y[1] = c[1]*31.998800*CWinv;
y[2] = c[2]*28.013400*CWinv;
return;
}
/*get Cp/R as a function of T */
/*for all species (Eq 19) */
void CKCPOR(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cpor)
{
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
cp_R(cpor, tc);
}
/*get H/RT as a function of T */
/*for all species (Eq 20) */
void CKHORT(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict hort)
{
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
speciesEnthalpy(hort, tc);
}
/*get S/R as a function of T */
/*for all species (Eq 21) */
void CKSOR(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sor)
{
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
speciesEntropy(sor, tc);
}
/*get specific heat at constant volume as a function */
/*of T for all species (molar units) */
void CKCVML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvml)
{
int id; /*loop counter */
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
cv_R(cvml, tc);
/*convert to chemkin units */
for (id = 0; id < 3; ++id) {
cvml[id] *= 8.31451e+07;
}
}
/*get specific heat at constant pressure as a */
/*function of T for all species (molar units) */
void CKCPML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cpml)
{
int id; /*loop counter */
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
cp_R(cpml, tc);
/*convert to chemkin units */
for (id = 0; id < 3; ++id) {
cpml[id] *= 8.31451e+07;
}
}
/*get internal energy as a function */
/*of T for all species (molar units) */
void CKUML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict uml)
{
int id; /*loop counter */
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double RT = 8.31451e+07*tT; /*R*T */
speciesInternalEnergy(uml, tc);
/*convert to chemkin units */
for (id = 0; id < 3; ++id) {
uml[id] *= RT;
}
}
/*get enthalpy as a function */
/*of T for all species (molar units) */
void CKHML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict hml)
{
int id; /*loop counter */
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double RT = 8.31451e+07*tT; /*R*T */
speciesEnthalpy(hml, tc);
/*convert to chemkin units */
for (id = 0; id < 3; ++id) {
hml[id] *= RT;
}
}
/*get standard-state Gibbs energy as a function */
/*of T for all species (molar units) */
void CKGML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict gml)
{
int id; /*loop counter */
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double RT = 8.31451e+07*tT; /*R*T */
gibbs(gml, tc);
/*convert to chemkin units */
for (id = 0; id < 3; ++id) {
gml[id] *= RT;
}
}
/*get standard-state Helmholtz free energy as a */
/*function of T for all species (molar units) */
void CKAML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict aml)
{
int id; /*loop counter */
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double RT = 8.31451e+07*tT; /*R*T */
helmholtz(aml, tc);
/*convert to chemkin units */
for (id = 0; id < 3; ++id) {
aml[id] *= RT;
}
}
/*Returns the standard-state entropies in molar units */
void CKSML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sml)
{
int id; /*loop counter */
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
speciesEntropy(sml, tc);
/*convert to chemkin units */
for (id = 0; id < 3; ++id) {
sml[id] *= 8.31451e+07;
}
}
/*Returns the specific heats at constant volume */
/*in mass units (Eq. 29) */
void CKCVMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvms)
{
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
cv_R(cvms, tc);
/*multiply by R/molecularweight */
cvms[0] *= 8.297452926019980e+05; /*NC7H16 */
cvms[1] *= 2.598381814318037e+06; /*O2 */
cvms[2] *= 2.968047434442088e+06; /*N2 */
}
/*Returns the specific heats at constant pressure */
/*in mass units (Eq. 26) */
void CKCPMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cpms)
{
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
cp_R(cpms, tc);
/*multiply by R/molecularweight */
cpms[0] *= 8.297452926019980e+05; /*NC7H16 */
cpms[1] *= 2.598381814318037e+06; /*O2 */
cpms[2] *= 2.968047434442088e+06; /*N2 */
}
/*Returns internal energy in mass units (Eq 30.) */
void CKUMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ums)
{
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double RT = 8.31451e+07*tT; /*R*T */
speciesInternalEnergy(ums, tc);
for (int i = 0; i < 3; i++)
{
ums[i] *= RT*imw[i];
}
}
/*Returns enthalpy in mass units (Eq 27.) */
void CKHMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict hms)
{
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double RT = 8.31451e+07*tT; /*R*T */
speciesEnthalpy(hms, tc);
for (int i = 0; i < 3; i++)
{
hms[i] *= RT*imw[i];
}
}
/*Returns enthalpy in mass units (Eq 27.) */
void VCKHMS(int * restrict np, double * restrict T, int * iwrk, double * restrict rwrk, double * restrict hms)
{
double tc[5], h[3];
for (int i=0; i<(*np); i++) {
tc[0] = 0.0;
tc[1] = T[i];
tc[2] = T[i]*T[i];
tc[3] = T[i]*T[i]*T[i];
tc[4] = T[i]*T[i]*T[i]*T[i];
speciesEnthalpy(h, tc);
hms[0*(*np)+i] = h[0];
hms[1*(*np)+i] = h[1];
hms[2*(*np)+i] = h[2];
}
for (int n=0; n<3; n++) {
for (int i=0; i<(*np); i++) {
hms[n*(*np)+i] *= 8.31451e+07 * T[i] * imw[n];
}
}
}
/*Returns gibbs in mass units (Eq 31.) */
void CKGMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict gms)
{
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double RT = 8.31451e+07*tT; /*R*T */
gibbs(gms, tc);
for (int i = 0; i < 3; i++)
{
gms[i] *= RT*imw[i];
}
}
/*Returns helmholtz in mass units (Eq 32.) */
void CKAMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ams)
{
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double RT = 8.31451e+07*tT; /*R*T */
helmholtz(ams, tc);
for (int i = 0; i < 3; i++)
{
ams[i] *= RT*imw[i];
}
}
/*Returns the entropies in mass units (Eq 28.) */
void CKSMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sms)
{
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
speciesEntropy(sms, tc);
/*multiply by R/molecularweight */
sms[0] *= 8.297452926019980e+05; /*NC7H16 */
sms[1] *= 2.598381814318037e+06; /*O2 */
sms[2] *= 2.968047434442088e+06; /*N2 */
}
/*Returns the mean specific heat at CP (Eq. 33) */
void CKCPBL(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict cpbl)
{
int id; /*loop counter */
double result = 0;
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double cpor[3]; /* temporary storage */
cp_R(cpor, tc);
/*perform dot product */
for (id = 0; id < 3; ++id) {
result += x[id]*cpor[id];
}
*cpbl = result * 8.31451e+07;
}
/*Returns the mean specific heat at CP (Eq. 34) */
void CKCPBS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict cpbs)
{
double result = 0;
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double cpor[3], tresult[3]; /* temporary storage */
cp_R(cpor, tc);
for (int i = 0; i < 3; i++)
{
tresult[i] = cpor[i]*y[i]*imw[i];
}
for (int i = 0; i < 3; i++)
{
result += tresult[i];
}
*cpbs = result * 8.31451e+07;
}
/*Returns the mean specific heat at CV (Eq. 35) */
void CKCVBL(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict cvbl)
{
int id; /*loop counter */
double result = 0;
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double cvor[3]; /* temporary storage */
cv_R(cvor, tc);
/*perform dot product */
for (id = 0; id < 3; ++id) {
result += x[id]*cvor[id];
}
*cvbl = result * 8.31451e+07;
}
/*Returns the mean specific heat at CV (Eq. 36) */
void CKCVBS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict cvbs)
{
double result = 0;
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double cvor[3]; /* temporary storage */
cv_R(cvor, tc);
/*multiply by y/molecularweight */
result += cvor[0]*y[0]*imw[0]; /*NC7H16 */
result += cvor[1]*y[1]*imw[1]; /*O2 */
result += cvor[2]*y[2]*imw[2]; /*N2 */
*cvbs = result * 8.31451e+07;
}
/*Returns the mean enthalpy of the mixture in molar units */
void CKHBML(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict hbml)
{
int id; /*loop counter */
double result = 0;
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double hml[3]; /* temporary storage */
double RT = 8.31451e+07*tT; /*R*T */
speciesEnthalpy(hml, tc);
/*perform dot product */
for (id = 0; id < 3; ++id) {
result += x[id]*hml[id];
}
*hbml = result * RT;
}
/*Returns mean enthalpy of mixture in mass units */
void CKHBMS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict hbms)
{
double result = 0;
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double hml[3], tmp[3]; /* temporary storage */
double RT = 8.31451e+07*tT; /*R*T */
speciesEnthalpy(hml, tc);
int id;
for (id = 0; id < 3; ++id) {
tmp[id] = y[id]*hml[id]*imw[id];
}
for (id = 0; id < 3; ++id) {
result += tmp[id];
}
*hbms = result * RT;
}
/*get mean internal energy in molar units */
void CKUBML(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict ubml)
{
int id; /*loop counter */
double result = 0;
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double uml[3]; /* temporary energy array */
double RT = 8.31451e+07*tT; /*R*T */
speciesInternalEnergy(uml, tc);
/*perform dot product */
for (id = 0; id < 3; ++id) {
result += x[id]*uml[id];
}
*ubml = result * RT;
}
/*get mean internal energy in mass units */
void CKUBMS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict ubms)
{
double result = 0;
double tT = *T; /*temporary temperature */
double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double ums[3]; /* temporary energy array */
double RT = 8.31451e+07*tT; /*R*T */
speciesInternalEnergy(ums, tc);
/*perform dot product + scaling by wt */
result += y[0]*ums[0]*imw[0]; /*NC7H16 */
result += y[1]*ums[1]*imw[1]; /*O2 */
result += y[2]*ums[2]*imw[2]; /*N2 */
*ubms = result * RT;
}
/*get mixture entropy in molar units */
void CKSBML(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict sbml)
{
int id; /*loop counter */
double result = 0;
/*Log of normalized pressure in cgs units dynes/cm^2 by Patm */
double logPratio = log ( *P / 1013250.0 );
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double sor[3]; /* temporary storage */
speciesEntropy(sor, tc);
/*Compute Eq 42 */
for (id = 0; id < 3; ++id) {
result += x[id]*(sor[id]-log((x[id]+1e-100))-logPratio);
}
*sbml = result * 8.31451e+07;
}
/*get mixture entropy in mass units */
void CKSBMS(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict sbms)
{
double result = 0;
/*Log of normalized pressure in cgs units dynes/cm^2 by Patm */
double logPratio = log ( *P / 1013250.0 );
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double sor[3]; /* temporary storage */
double x[3]; /* need a ytx conversion */
double YOW = 0; /*See Eq 4, 6 in CK Manual */
/*Compute inverse of mean molecular wt first */
YOW += y[0]*imw[0]; /*NC7H16 */
YOW += y[1]*imw[1]; /*O2 */
YOW += y[2]*imw[2]; /*N2 */
/*Now compute y to x conversion */
x[0] = y[0]/(100.205570*YOW);
x[1] = y[1]/(31.998800*YOW);
x[2] = y[2]/(28.013400*YOW);
speciesEntropy(sor, tc);
/*Perform computation in Eq 42 and 43 */
result += x[0]*(sor[0]-log((x[0]+1e-100))-logPratio);
result += x[1]*(sor[1]-log((x[1]+1e-100))-logPratio);
result += x[2]*(sor[2]-log((x[2]+1e-100))-logPratio);
/*Scale by R/W */
*sbms = result * 8.31451e+07 * YOW;
}
/*Returns mean gibbs free energy in molar units */
void CKGBML(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict gbml)
{
int id; /*loop counter */
double result = 0;
/*Log of normalized pressure in cgs units dynes/cm^2 by Patm */
double logPratio = log ( *P / 1013250.0 );
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double RT = 8.31451e+07*tT; /*R*T */
double gort[3]; /* temporary storage */
/*Compute g/RT */
gibbs(gort, tc);
/*Compute Eq 44 */
for (id = 0; id < 3; ++id) {
result += x[id]*(gort[id]+log((x[id]+1e-100))+logPratio);
}
*gbml = result * RT;
}
/*Returns mixture gibbs free energy in mass units */
void CKGBMS(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict gbms)
{
double result = 0;
/*Log of normalized pressure in cgs units dynes/cm^2 by Patm */
double logPratio = log ( *P / 1013250.0 );
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double RT = 8.31451e+07*tT; /*R*T */
double gort[3]; /* temporary storage */
double x[3]; /* need a ytx conversion */
double YOW = 0; /*To hold 1/molecularweight */
/*Compute inverse of mean molecular wt first */
YOW += y[0]*imw[0]; /*NC7H16 */
YOW += y[1]*imw[1]; /*O2 */
YOW += y[2]*imw[2]; /*N2 */
/*Now compute y to x conversion */
x[0] = y[0]/(100.205570*YOW);
x[1] = y[1]/(31.998800*YOW);
x[2] = y[2]/(28.013400*YOW);
gibbs(gort, tc);
/*Perform computation in Eq 44 */
result += x[0]*(gort[0]+log((x[0]+1e-100))+logPratio);
result += x[1]*(gort[1]+log((x[1]+1e-100))+logPratio);
result += x[2]*(gort[2]+log((x[2]+1e-100))+logPratio);
/*Scale by RT/W */
*gbms = result * RT * YOW;
}
/*Returns mean helmholtz free energy in molar units */
void CKABML(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict abml)
{
int id; /*loop counter */
double result = 0;
/*Log of normalized pressure in cgs units dynes/cm^2 by Patm */
double logPratio = log ( *P / 1013250.0 );
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double RT = 8.31451e+07*tT; /*R*T */
double aort[3]; /* temporary storage */
/*Compute g/RT */
helmholtz(aort, tc);
/*Compute Eq 44 */
for (id = 0; id < 3; ++id) {
result += x[id]*(aort[id]+log((x[id]+1e-100))+logPratio);
}
*abml = result * RT;
}
/*Returns mixture helmholtz free energy in mass units */
void CKABMS(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict abms)
{
double result = 0;
/*Log of normalized pressure in cgs units dynes/cm^2 by Patm */
double logPratio = log ( *P / 1013250.0 );
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double RT = 8.31451e+07*tT; /*R*T */
double aort[3]; /* temporary storage */
double x[3]; /* need a ytx conversion */
double YOW = 0; /*To hold 1/molecularweight */
/*Compute inverse of mean molecular wt first */
YOW += y[0]*imw[0]; /*NC7H16 */
YOW += y[1]*imw[1]; /*O2 */
YOW += y[2]*imw[2]; /*N2 */
/*Now compute y to x conversion */
x[0] = y[0]/(100.205570*YOW);
x[1] = y[1]/(31.998800*YOW);
x[2] = y[2]/(28.013400*YOW);
helmholtz(aort, tc);
/*Perform computation in Eq 44 */
result += x[0]*(aort[0]+log((x[0]+1e-100))+logPratio);
result += x[1]*(aort[1]+log((x[1]+1e-100))+logPratio);
result += x[2]*(aort[2]+log((x[2]+1e-100))+logPratio);
/*Scale by RT/W */
*abms = result * RT * YOW;
}
/*compute the production rate for each species */
void CKWC(double * restrict T, double * restrict C, int * iwrk, double * restrict rwrk, double * restrict wdot)
{
int id; /*loop counter */
/*convert to SI */
for (id = 0; id < 3; ++id) {
C[id] *= 1.0e6;
}
/*convert to chemkin units */
productionRate(wdot, C, *T);
/*convert to chemkin units */
for (id = 0; id < 3; ++id) {
C[id] *= 1.0e-6;
wdot[id] *= 1.0e-6;
}
}
/*Returns the molar production rate of species */
/*Given P, T, and mass fractions */
void CKWYP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wdot)
{
int id; /*loop counter */
double c[3]; /*temporary storage */
double YOW = 0;
double PWORT;
/*Compute inverse of mean molecular wt first */
YOW += y[0]*imw[0]; /*NC7H16 */
YOW += y[1]*imw[1]; /*O2 */
YOW += y[2]*imw[2]; /*N2 */
/*PW/RT (see Eq. 7) */
PWORT = (*P)/(YOW * 8.31451e+07 * (*T));
/*multiply by 1e6 so c goes to SI */
PWORT *= 1e6;
/*Now compute conversion (and go to SI) */
c[0] = PWORT * y[0]*imw[0];
c[1] = PWORT * y[1]*imw[1];
c[2] = PWORT * y[2]*imw[2];
/*convert to chemkin units */
productionRate(wdot, c, *T);
/*convert to chemkin units */
for (id = 0; id < 3; ++id) {
wdot[id] *= 1.0e-6;
}
}
/*Returns the molar production rate of species */
/*Given P, T, and mole fractions */
void CKWXP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wdot)
{
int id; /*loop counter */
double c[3]; /*temporary storage */
double PORT = 1e6 * (*P)/(8.31451e+07 * (*T)); /*1e6 * P/RT so c goes to SI units */
/*Compute conversion, see Eq 10 */
for (id = 0; id < 3; ++id) {
c[id] = x[id]*PORT;
}
/*convert to chemkin units */
productionRate(wdot, c, *T);
/*convert to chemkin units */
for (id = 0; id < 3; ++id) {
wdot[id] *= 1.0e-6;
}
}
/*Returns the molar production rate of species */
/*Given rho, T, and mass fractions */
void CKWYR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wdot)
{
int id; /*loop counter */
double c[3]; /*temporary storage */
/*See Eq 8 with an extra 1e6 so c goes to SI */
c[0] = 1e6 * (*rho) * y[0]*imw[0];
c[1] = 1e6 * (*rho) * y[1]*imw[1];
c[2] = 1e6 * (*rho) * y[2]*imw[2];
/*call productionRate */
productionRate(wdot, c, *T);
/*convert to chemkin units */
for (id = 0; id < 3; ++id) {
wdot[id] *= 1.0e-6;
}
}
/*Returns the molar production rate of species */
/*Given rho, T, and mass fractions */
void VCKWYR(int * restrict np, double * restrict rho, double * restrict T,
double * restrict y, int * restrict iwrk, double * restrict rwrk,
double * restrict wdot)
{
double c[3*(*np)]; /*temporary storage */
/*See Eq 8 with an extra 1e6 so c goes to SI */
for (int n=0; n<3; n++) {
for (int i=0; i<(*np); i++) {
c[n*(*np)+i] = 1.0e6 * rho[i] * y[n*(*np)+i] * imw[n];
}
}
/*call productionRate */
vproductionRate(*np, wdot, c, T);
/*convert to chemkin units */
for (int i=0; i<3*(*np); i++) {
wdot[i] *= 1.0e-6;
}
}
/*Returns the molar production rate of species */
/*Given rho, T, and mole fractions */
void CKWXR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wdot)
{
int id; /*loop counter */
double c[3]; /*temporary storage */
double XW = 0; /*See Eq 4, 11 in CK Manual */
double ROW;
/*Compute mean molecular wt first */
XW += x[0]*100.205570; /*NC7H16 */
XW += x[1]*31.998800; /*O2 */
XW += x[2]*28.013400; /*N2 */
/*Extra 1e6 factor to take c to SI */
ROW = 1e6*(*rho) / XW;
/*Compute conversion, see Eq 11 */
for (id = 0; id < 3; ++id) {
c[id] = x[id]*ROW;
}
/*convert to chemkin units */
productionRate(wdot, c, *T);
/*convert to chemkin units */
for (id = 0; id < 3; ++id) {
wdot[id] *= 1.0e-6;
}
}
/*Returns the rate of progress for each reaction */
void CKQC(double * restrict T, double * restrict C, int * iwrk, double * restrict rwrk, double * restrict qdot)
{
int id; /*loop counter */
/*convert to SI */
for (id = 0; id < 3; ++id) {
C[id] *= 1.0e6;
}
/*convert to chemkin units */
progressRate(qdot, C, *T);
/*convert to chemkin units */
for (id = 0; id < 3; ++id) {
C[id] *= 1.0e-6;
}
for (id = 0; id < 0; ++id) {
qdot[id] *= 1.0e-6;
}
}
/*Returns the progress rates of each reactions */
/*Given P, T, and mole fractions */
void CKKFKR(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict q_f, double * restrict q_r)
{
int id; /*loop counter */
double c[3]; /*temporary storage */
double PORT = 1e6 * (*P)/(8.31451e+07 * (*T)); /*1e6 * P/RT so c goes to SI units */
/*Compute conversion, see Eq 10 */
for (id = 0; id < 3; ++id) {
c[id] = x[id]*PORT;
}
/*convert to chemkin units */
progressRateFR(q_f, q_r, c, *T);
/*convert to chemkin units */
for (id = 0; id < 0; ++id) {
q_f[id] *= 1.0e-6;
q_r[id] *= 1.0e-6;
}
}
/*Returns the progress rates of each reactions */
/*Given P, T, and mass fractions */
void CKQYP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict qdot)
{
int id; /*loop counter */
double c[3]; /*temporary storage */
double YOW = 0;
double PWORT;
/*Compute inverse of mean molecular wt first */
YOW += y[0]*imw[0]; /*NC7H16 */
YOW += y[1]*imw[1]; /*O2 */
YOW += y[2]*imw[2]; /*N2 */
/*PW/RT (see Eq. 7) */
PWORT = (*P)/(YOW * 8.31451e+07 * (*T));
/*multiply by 1e6 so c goes to SI */
PWORT *= 1e6;
/*Now compute conversion (and go to SI) */
c[0] = PWORT * y[0]*imw[0];
c[1] = PWORT * y[1]*imw[1];
c[2] = PWORT * y[2]*imw[2];
/*convert to chemkin units */
progressRate(qdot, c, *T);
/*convert to chemkin units */
for (id = 0; id < 0; ++id) {
qdot[id] *= 1.0e-6;
}
}
/*Returns the progress rates of each reactions */
/*Given P, T, and mole fractions */
void CKQXP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict qdot)
{
int id; /*loop counter */
double c[3]; /*temporary storage */
double PORT = 1e6 * (*P)/(8.31451e+07 * (*T)); /*1e6 * P/RT so c goes to SI units */
/*Compute conversion, see Eq 10 */
for (id = 0; id < 3; ++id) {
c[id] = x[id]*PORT;
}
/*convert to chemkin units */
progressRate(qdot, c, *T);
/*convert to chemkin units */
for (id = 0; id < 0; ++id) {
qdot[id] *= 1.0e-6;
}
}
/*Returns the progress rates of each reactions */
/*Given rho, T, and mass fractions */
void CKQYR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict qdot)
{
int id; /*loop counter */
double c[3]; /*temporary storage */
/*See Eq 8 with an extra 1e6 so c goes to SI */
c[0] = 1e6 * (*rho) * y[0]*imw[0];
c[1] = 1e6 * (*rho) * y[1]*imw[1];
c[2] = 1e6 * (*rho) * y[2]*imw[2];
/*call progressRate */
progressRate(qdot, c, *T);
/*convert to chemkin units */
for (id = 0; id < 0; ++id) {
qdot[id] *= 1.0e-6;
}
}
/*Returns the progress rates of each reactions */
/*Given rho, T, and mole fractions */
void CKQXR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict qdot)
{
int id; /*loop counter */
double c[3]; /*temporary storage */
double XW = 0; /*See Eq 4, 11 in CK Manual */
double ROW;
/*Compute mean molecular wt first */
XW += x[0]*100.205570; /*NC7H16 */
XW += x[1]*31.998800; /*O2 */
XW += x[2]*28.013400; /*N2 */
/*Extra 1e6 factor to take c to SI */
ROW = 1e6*(*rho) / XW;
/*Compute conversion, see Eq 11 */
for (id = 0; id < 3; ++id) {
c[id] = x[id]*ROW;
}
/*convert to chemkin units */
progressRate(qdot, c, *T);
/*convert to chemkin units */
for (id = 0; id < 0; ++id) {
qdot[id] *= 1.0e-6;
}
}
/*Returns the stoichiometric coefficients */
/*of the reaction mechanism. (Eq 50) */
void CKNU(int * kdim, int * iwrk, double * restrict rwrk, int * nuki)
{
int id; /*loop counter */
int kd = (*kdim);
/*Zero nuki */
for (id = 0; id < 3 * kd; ++ id) {
nuki[id] = 0;
}
}
/*Returns the elemental composition */
/*of the speciesi (mdim is num of elements) */
void CKNCF(int * mdim, int * iwrk, double * restrict rwrk, int * ncf)
{
int id; /*loop counter */
int kd = (*mdim);
/*Zero ncf */
for (id = 0; id < kd * 3; ++ id) {
ncf[id] = 0;
}
/*NC7H16 */
ncf[ 0 * kd + 0 ] = 7; /*C */
ncf[ 0 * kd + 1 ] = 16; /*H */
/*O2 */
ncf[ 1 * kd + 2 ] = 2; /*O */
/*N2 */
ncf[ 2 * kd + 3 ] = 2; /*N */
}
/*Returns the arrehenius coefficients */
/*for all reactions */
void CKABE(int * iwrk, double * restrict rwrk, double * restrict a, double * restrict b, double * restrict e)
{
for (int i=0; i<0; ++i) {
a[i] = fwd_A[i];
b[i] = fwd_beta[i];
e[i] = fwd_Ea[i];
}
return;
}
/*Returns the equil constants for each reaction */
void CKEQC(double * restrict T, double * restrict C, int * iwrk, double * restrict rwrk, double * restrict eqcon)
{
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double gort[3]; /* temporary storage */
/*compute the Gibbs free energy */
gibbs(gort, tc);
/*compute the equilibrium constants */
equilibriumConstants(eqcon, gort, tT);
}
/*Returns the equil constants for each reaction */
/*Given P, T, and mass fractions */
void CKEQYP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict eqcon)
{
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double gort[3]; /* temporary storage */
/*compute the Gibbs free energy */
gibbs(gort, tc);
/*compute the equilibrium constants */
equilibriumConstants(eqcon, gort, tT);
}
/*Returns the equil constants for each reaction */
/*Given P, T, and mole fractions */
void CKEQXP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict eqcon)
{
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double gort[3]; /* temporary storage */
/*compute the Gibbs free energy */
gibbs(gort, tc);
/*compute the equilibrium constants */
equilibriumConstants(eqcon, gort, tT);
}
/*Returns the equil constants for each reaction */
/*Given rho, T, and mass fractions */
void CKEQYR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict eqcon)
{
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double gort[3]; /* temporary storage */
/*compute the Gibbs free energy */
gibbs(gort, tc);
/*compute the equilibrium constants */
equilibriumConstants(eqcon, gort, tT);
}
/*Returns the equil constants for each reaction */
/*Given rho, T, and mole fractions */
void CKEQXR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict eqcon)
{
double tT = *T; /*temporary temperature */
double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */
double gort[3]; /* temporary storage */
/*compute the Gibbs free energy */
gibbs(gort, tc);
/*compute the equilibrium constants */
equilibriumConstants(eqcon, gort, tT);
}
static double T_save = -1;
#ifdef _OPENMP
#pragma omp threadprivate(T_save)
#endif
static double k_f_save[0];
#ifdef _OPENMP
#pragma omp threadprivate(k_f_save)
#endif
static double Kc_save[0];
#ifdef _OPENMP
#pragma omp threadprivate(Kc_save)
#endif
/*compute the production rate for each species */
void productionRate(double * restrict wdot, double * restrict sc, double T)
{
double tc[] = { log(T), T, T*T, T*T*T, T*T*T*T }; /*temperature cache */
double invT = 1.0 / tc[1];
if (T != T_save)
{
T_save = T;
comp_k_f(tc,invT,k_f_save);
comp_Kc(tc,invT,Kc_save);
}
double qdot, q_f[0], q_r[0];
comp_qfqr(q_f, q_r, sc, tc, invT);
for (int i = 0; i < 3; ++i) {
wdot[i] = 0.0;
}
return;
}
void comp_k_f(double * restrict tc, double invT, double * restrict k_f)
{
#ifdef __INTEL_COMPILER
#pragma simd
#endif
for (int i=0; i<0; ++i) {
k_f[i] = prefactor_units[i] * fwd_A[i]
* exp(fwd_beta[i] * tc[0] - activation_units[i] * fwd_Ea[i] * invT);
};
return;
}
void comp_Kc(double * restrict tc, double invT, double * restrict Kc)
{
/*compute the Gibbs free energy */
double g_RT[3];
gibbs(g_RT, tc);
#ifdef __INTEL_COMPILER
#pragma simd
#endif
for (int i=0; i<0; ++i) {
Kc[i] = exp(Kc[i]);
};
/*reference concentration: P_atm / (RT) in inverse mol/m^3 */
double refC = 101325 / 8.31451 * invT;
double refCinv = 1 / refC;
return;
}
void comp_qfqr(double * restrict qf, double * restrict qr, double * restrict sc, double * restrict tc, double invT)
{
double T = tc[1];
/*compute the mixture concentration */
double mixture = 0.0;
for (int i = 0; i < 3; ++i) {
mixture += sc[i];
}
double Corr[0];
for (int i = 0; i < 0; ++i) {
Corr[i] = 1.0;
}
for (int i=0; i<0; i++)
{
qf[i] *= Corr[i] * k_f_save[i];
qr[i] *= Corr[i] * k_f_save[i] / Kc_save[i];
}
return;
}
/*compute the production rate for each species */
void vproductionRate(int npt, double * restrict wdot, double * restrict sc, double * restrict T)
{
double k_f_s[0*npt], Kc_s[0*npt], mixture[npt], g_RT[3*npt];
double tc[5*npt], invT[npt];
#ifdef __INTEL_COMPILER
#pragma simd
#endif
for (int i=0; i<npt; i++) {
tc[0*npt+i] = log(T[i]);
tc[1*npt+i] = T[i];
tc[2*npt+i] = T[i]*T[i];
tc[3*npt+i] = T[i]*T[i]*T[i];
tc[4*npt+i] = T[i]*T[i]*T[i]*T[i];
invT[i] = 1.0 / T[i];
}
for (int i=0; i<npt; i++) {
mixture[i] = 0.0;
}
for (int n=0; n<3; n++) {
for (int i=0; i<npt; i++) {
mixture[i] += sc[n*npt+i];
wdot[n*npt+i] = 0.0;
}
}
vcomp_k_f(npt, k_f_s, tc, invT);
vcomp_gibbs(npt, g_RT, tc);
vcomp_Kc(npt, Kc_s, g_RT, invT);
vcomp_wdot(npt, wdot, mixture, sc, k_f_s, Kc_s, tc, invT, T);
}
void vcomp_k_f(int npt, double * restrict k_f_s, double * restrict tc, double * restrict invT)
{
#ifdef __INTEL_COMPILER
#pragma simd
#endif
for (int i=0; i<npt; i++) {
}
}
void vcomp_gibbs(int npt, double * restrict g_RT, double * restrict tc)
{
/*compute the Gibbs free energy */
for (int i=0; i<npt; i++) {
double tg[5], g[3];
tg[0] = tc[0*npt+i];
tg[1] = tc[1*npt+i];
tg[2] = tc[2*npt+i];
tg[3] = tc[3*npt+i];
tg[4] = tc[4*npt+i];
gibbs(g, tg);
g_RT[0*npt+i] = g[0];
g_RT[1*npt+i] = g[1];
g_RT[2*npt+i] = g[2];
}
}
void vcomp_Kc(int npt, double * restrict Kc_s, double * restrict g_RT, double * restrict invT)
{
#ifdef __INTEL_COMPILER
#pragma simd
#endif
for (int i=0; i<npt; i++) {
/*reference concentration: P_atm / (RT) in inverse mol/m^3 */
double refC = (101325. / 8.31451) * invT[i];
double refCinv = 1.0 / refC;
}
}
void vcomp_wdot(int npt, double * restrict wdot, double * restrict mixture, double * restrict sc,
double * restrict k_f_s, double * restrict Kc_s,
double * restrict tc, double * restrict invT, double * restrict T)
{
#ifdef __INTEL_COMPILER
#pragma simd
#endif
for (int i=0; i<npt; i++) {
double qdot, q_f, q_r, phi_f, phi_r, k_f, k_r, Kc;
}
}
/*compute the reaction Jacobian */
void DWDOT(double * restrict J, double * restrict sc, double * restrict Tp, int * consP)
{
double c[3];
for (int k=0; k<3; k++) {
c[k] = 1.e6 * sc[k];
}
aJacobian(J, c, *Tp, *consP);
/* dwdot[k]/dT */
for (int k=0; k<3; k++) {
J[12+k] *= 1.e-6;
}
/* dTdot/d[X] */
for (int k=0; k<3; k++) {
J[k*4+3] *= 1.e6;
}
return;
}
/*compute the reaction Jacobian */
void aJacobian(double * restrict J, double * restrict sc, double T, int consP)
{
for (int i=0; i<16; i++) {
J[i] = 0.0;
}
double wdot[3];
for (int k=0; k<3; k++) {
wdot[k] = 0.0;
}
double tc[] = { log(T), T, T*T, T*T*T, T*T*T*T }; /*temperature cache */
double invT = 1.0 / tc[1];
double invT2 = invT * invT;
/*reference concentration: P_atm / (RT) in inverse mol/m^3 */
double refC = 101325 / 8.31451 / T;
double refCinv = 1.0 / refC;
/*compute the mixture concentration */
double mixture = 0.0;
for (int k = 0; k < 3; ++k) {
mixture += sc[k];
}
/*compute the Gibbs free energy */
double g_RT[3];
gibbs(g_RT, tc);
/*compute the species enthalpy */
double h_RT[3];
speciesEnthalpy(h_RT, tc);
double phi_f, k_f, k_r, phi_r, Kc, q, q_nocor, Corr, alpha;
double dlnkfdT, dlnk0dT, dlnKcdT, dkrdT, dqdT;
double dqdci, dcdc_fac, dqdc[3];
double Pr, fPr, F, k_0, logPr;
double logFcent, troe_c, troe_n, troePr_den, troePr, troe;
double Fcent1, Fcent2, Fcent3, Fcent;
double dlogFdc, dlogFdn, dlogFdcn_fac;
double dlogPrdT, dlogfPrdT, dlogFdT, dlogFcentdT, dlogFdlogPr, dlnCorrdT;
const double ln10 = log(10.0);
const double log10e = 1.0/log(10.0);
double c_R[3], dcRdT[3], e_RT[3];
double * eh_RT;
if (consP) {
cp_R(c_R, tc);
dcvpRdT(dcRdT, tc);
eh_RT = &h_RT[0];
}
else {
cv_R(c_R, tc);
dcvpRdT(dcRdT, tc);
speciesInternalEnergy(e_RT, tc);
eh_RT = &e_RT[0];
}
double cmix = 0.0, ehmix = 0.0, dcmixdT=0.0, dehmixdT=0.0;
for (int k = 0; k < 3; ++k) {
cmix += c_R[k]*sc[k];
dcmixdT += dcRdT[k]*sc[k];
ehmix += eh_RT[k]*wdot[k];
dehmixdT += invT*(c_R[k]-eh_RT[k])*wdot[k] + eh_RT[k]*J[12+k];
}
double cmixinv = 1.0/cmix;
double tmp1 = ehmix*cmixinv;
double tmp3 = cmixinv*T;
double tmp2 = tmp1*tmp3;
double dehmixdc;
/* dTdot/d[X] */
for (int k = 0; k < 3; ++k) {
dehmixdc = 0.0;
for (int m = 0; m < 3; ++m) {
dehmixdc += eh_RT[m]*J[k*4+m];
}
J[k*4+3] = tmp2*c_R[k] - tmp3*dehmixdc;
}
/* dTdot/dT */
J[15] = -tmp1 + tmp2*dcmixdT - tmp3*dehmixdT;
}
/*compute d(Cp/R)/dT and d(Cv/R)/dT at the given temperature */
/*tc contains precomputed powers of T, tc[0] = log(T) */
void dcvpRdT(double * restrict species, double * restrict tc)
{
/*temperature */
double T = tc[1];
/*species with midpoint at T=1000 kelvin */
if (T < 1000) {
/*species 1: O2 */
species[1] =
+1.12748635e-03
-1.15123009e-06 * tc[1]
+3.94163169e-09 * tc[2]
-3.50742157e-12 * tc[3];
/*species 2: N2 */
species[2] =
+1.40824000e-03
-7.92644400e-06 * tc[1]
+1.69245450e-08 * tc[2]
-9.77942000e-12 * tc[3];
} else {
/*species 1: O2 */
species[1] =
+6.13519689e-04
-2.51768398e-07 * tc[1]
+5.32584444e-11 * tc[2]
-4.54574124e-15 * tc[3];
/*species 2: N2 */
species[2] =
+1.48797700e-03
-1.13695220e-06 * tc[1]
+3.02911200e-10 * tc[2]
-2.70134040e-14 * tc[3];
}
/*species with midpoint at T=1391 kelvin */
if (T < 1391) {
/*species 0: NC7H16 */
species[0] =
+8.54355820e-02
-1.05069357e-04 * tc[1]
+4.88837163e-08 * tc[2]
-8.09579700e-12 * tc[3];
} else {
/*species 0: NC7H16 */
species[0] =
+3.47675750e-02
-2.36814258e-05 * tc[1]
+5.49895434e-09 * tc[2]
-4.24521064e-13 * tc[3];
}
return;
}
/*compute the progress rate for each reaction */
void progressRate(double * restrict qdot, double * restrict sc, double T)
{
double tc[] = { log(T), T, T*T, T*T*T, T*T*T*T }; /*temperature cache */
double invT = 1.0 / tc[1];
if (T != T_save)
{
T_save = T;
comp_k_f(tc,invT,k_f_save);
comp_Kc(tc,invT,Kc_save);
}
double q_f[0], q_r[0];
comp_qfqr(q_f, q_r, sc, tc, invT);
for (int i = 0; i < 0; ++i) {
qdot[i] = q_f[i] - q_r[i];
}
return;
}
/*compute the progress rate for each reaction */
void progressRateFR(double * restrict q_f, double * restrict q_r, double * restrict sc, double T)
{
double tc[] = { log(T), T, T*T, T*T*T, T*T*T*T }; /*temperature cache */
double invT = 1.0 / tc[1];
if (T != T_save)
{
T_save = T;
comp_k_f(tc,invT,k_f_save);
comp_Kc(tc,invT,Kc_save);
}
comp_qfqr(q_f, q_r, sc, tc, invT);
return;
}
/*compute the equilibrium constants for each reaction */
void equilibriumConstants(double * restrict kc, double * restrict g_RT, double T)
{
/*reference concentration: P_atm / (RT) in inverse mol/m^3 */
double refC = 101325 / 8.31451 / T;
return;
}
/*compute the g/(RT) at the given temperature */
/*tc contains precomputed powers of T, tc[0] = log(T) */
void gibbs(double * restrict species, double * restrict tc)
{
/*temperature */
double T = tc[1];
double invT = 1 / T;
/*species with midpoint at T=1000 kelvin */
if (T < 1000) {
/*species 1: O2 */
species[1] =
-1.005249020000000e+03 * invT
-2.821801190000000e+00
-3.212936400000000e+00 * tc[0]
-5.637431750000000e-04 * tc[1]
+9.593584116666666e-08 * tc[2]
-1.094897691666667e-10 * tc[3]
+4.384276960000000e-14 * tc[4];
/*species 2: N2 */
species[2] =
-1.020900000000000e+03 * invT
-6.516950000000001e-01
-3.298677000000000e+00 * tc[0]
-7.041200000000000e-04 * tc[1]
+6.605369999999999e-07 * tc[2]
-4.701262500000001e-10 * tc[3]
+1.222427500000000e-13 * tc[4];
} else {
/*species 1: O2 */
species[1] =
-1.233930180000000e+03 * invT
+5.084126000000002e-01
-3.697578190000000e+00 * tc[0]
-3.067598445000000e-04 * tc[1]
+2.098069983333333e-08 * tc[2]
-1.479401233333333e-12 * tc[3]
+5.682176550000000e-17 * tc[4];
/*species 2: N2 */
species[2] =
-9.227977000000000e+02 * invT
-3.053888000000000e+00
-2.926640000000000e+00 * tc[0]
-7.439885000000000e-04 * tc[1]
+9.474601666666666e-08 * tc[2]
-8.414199999999999e-12 * tc[3]
+3.376675500000000e-16 * tc[4];
}
/*species with midpoint at T=1391 kelvin */
if (T < 1391) {
/*species 0: NC7H16 */
species[0] =
-2.565865650000000e+04 * invT
-3.664165307000000e+01
+1.268361870000000e+00 * tc[0]
-4.271779100000000e-02 * tc[1]
+8.755779766666667e-06 * tc[2]
-1.357881008333333e-09 * tc[3]
+1.011974625000000e-13 * tc[4];
} else {
/*species 0: NC7H16 */
species[0] =
-3.427600810000000e+04 * invT
+1.145189165000000e+02
-2.221489690000000e+01 * tc[0]
-1.738378750000000e-02 * tc[1]
+1.973452150000000e-06 * tc[2]
-1.527487316666667e-10 * tc[3]
+5.306513300000000e-15 * tc[4];
}
return;
}
/*compute the a/(RT) at the given temperature */
/*tc contains precomputed powers of T, tc[0] = log(T) */
void helmholtz(double * restrict species, double * restrict tc)
{
/*temperature */
double T = tc[1];
double invT = 1 / T;
/*species with midpoint at T=1000 kelvin */
if (T < 1000) {
/*species 1: O2 */
species[1] =
-1.00524902e+03 * invT
-3.82180119e+00
-3.21293640e+00 * tc[0]
-5.63743175e-04 * tc[1]
+9.59358412e-08 * tc[2]
-1.09489769e-10 * tc[3]
+4.38427696e-14 * tc[4];
/*species 2: N2 */
species[2] =
-1.02090000e+03 * invT
-1.65169500e+00
-3.29867700e+00 * tc[0]
-7.04120000e-04 * tc[1]
+6.60537000e-07 * tc[2]
-4.70126250e-10 * tc[3]
+1.22242750e-13 * tc[4];
} else {
/*species 1: O2 */
species[1] =
-1.23393018e+03 * invT
-4.91587400e-01
-3.69757819e+00 * tc[0]
-3.06759845e-04 * tc[1]
+2.09806998e-08 * tc[2]
-1.47940123e-12 * tc[3]
+5.68217655e-17 * tc[4];
/*species 2: N2 */
species[2] =
-9.22797700e+02 * invT
-4.05388800e+00
-2.92664000e+00 * tc[0]
-7.43988500e-04 * tc[1]
+9.47460167e-08 * tc[2]
-8.41420000e-12 * tc[3]
+3.37667550e-16 * tc[4];
}
/*species with midpoint at T=1391 kelvin */
if (T < 1391) {
/*species 0: NC7H16 */
species[0] =
-2.56586565e+04 * invT
-3.76416531e+01
+1.26836187e+00 * tc[0]
-4.27177910e-02 * tc[1]
+8.75577977e-06 * tc[2]
-1.35788101e-09 * tc[3]
+1.01197462e-13 * tc[4];
} else {
/*species 0: NC7H16 */
species[0] =
-3.42760081e+04 * invT
+1.13518917e+02
-2.22148969e+01 * tc[0]
-1.73837875e-02 * tc[1]
+1.97345215e-06 * tc[2]
-1.52748732e-10 * tc[3]
+5.30651330e-15 * tc[4];
}
return;
}
/*compute Cv/R at the given temperature */
/*tc contains precomputed powers of T, tc[0] = log(T) */
void cv_R(double * restrict species, double * restrict tc)
{
/*temperature */
double T = tc[1];
/*species with midpoint at T=1000 kelvin */
if (T < 1000) {
/*species 1: O2 */
species[1] =
+2.21293640e+00
+1.12748635e-03 * tc[1]
-5.75615047e-07 * tc[2]
+1.31387723e-09 * tc[3]
-8.76855392e-13 * tc[4];
/*species 2: N2 */
species[2] =
+2.29867700e+00
+1.40824000e-03 * tc[1]
-3.96322200e-06 * tc[2]
+5.64151500e-09 * tc[3]
-2.44485500e-12 * tc[4];
} else {
/*species 1: O2 */
species[1] =
+2.69757819e+00
+6.13519689e-04 * tc[1]
-1.25884199e-07 * tc[2]
+1.77528148e-11 * tc[3]
-1.13643531e-15 * tc[4];
/*species 2: N2 */
species[2] =
+1.92664000e+00
+1.48797700e-03 * tc[1]
-5.68476100e-07 * tc[2]
+1.00970400e-10 * tc[3]
-6.75335100e-15 * tc[4];
}
/*species with midpoint at T=1391 kelvin */
if (T < 1391) {
/*species 0: NC7H16 */
species[0] =
-2.26836187e+00
+8.54355820e-02 * tc[1]
-5.25346786e-05 * tc[2]
+1.62945721e-08 * tc[3]
-2.02394925e-12 * tc[4];
} else {
/*species 0: NC7H16 */
species[0] =
+2.12148969e+01
+3.47675750e-02 * tc[1]
-1.18407129e-05 * tc[2]
+1.83298478e-09 * tc[3]
-1.06130266e-13 * tc[4];
}
return;
}
/*compute Cp/R at the given temperature */
/*tc contains precomputed powers of T, tc[0] = log(T) */
void cp_R(double * restrict species, double * restrict tc)
{
/*temperature */
double T = tc[1];
/*species with midpoint at T=1000 kelvin */
if (T < 1000) {
/*species 1: O2 */
species[1] =
+3.21293640e+00
+1.12748635e-03 * tc[1]
-5.75615047e-07 * tc[2]
+1.31387723e-09 * tc[3]
-8.76855392e-13 * tc[4];
/*species 2: N2 */
species[2] =
+3.29867700e+00
+1.40824000e-03 * tc[1]
-3.96322200e-06 * tc[2]
+5.64151500e-09 * tc[3]
-2.44485500e-12 * tc[4];
} else {
/*species 1: O2 */
species[1] =
+3.69757819e+00
+6.13519689e-04 * tc[1]
-1.25884199e-07 * tc[2]
+1.77528148e-11 * tc[3]
-1.13643531e-15 * tc[4];
/*species 2: N2 */
species[2] =
+2.92664000e+00
+1.48797700e-03 * tc[1]
-5.68476100e-07 * tc[2]
+1.00970400e-10 * tc[3]
-6.75335100e-15 * tc[4];
}
/*species with midpoint at T=1391 kelvin */
if (T < 1391) {
/*species 0: NC7H16 */
species[0] =
-1.26836187e+00
+8.54355820e-02 * tc[1]
-5.25346786e-05 * tc[2]
+1.62945721e-08 * tc[3]
-2.02394925e-12 * tc[4];
} else {
/*species 0: NC7H16 */
species[0] =
+2.22148969e+01
+3.47675750e-02 * tc[1]
-1.18407129e-05 * tc[2]
+1.83298478e-09 * tc[3]
-1.06130266e-13 * tc[4];
}
return;
}
/*compute the e/(RT) at the given temperature */
/*tc contains precomputed powers of T, tc[0] = log(T) */
void speciesInternalEnergy(double * restrict species, double * restrict tc)
{
/*temperature */
double T = tc[1];
double invT = 1 / T;
/*species with midpoint at T=1000 kelvin */
if (T < 1000) {
/*species 1: O2 */
species[1] =
+2.21293640e+00
+5.63743175e-04 * tc[1]
-1.91871682e-07 * tc[2]
+3.28469308e-10 * tc[3]
-1.75371078e-13 * tc[4]
-1.00524902e+03 * invT;
/*species 2: N2 */
species[2] =
+2.29867700e+00
+7.04120000e-04 * tc[1]
-1.32107400e-06 * tc[2]
+1.41037875e-09 * tc[3]
-4.88971000e-13 * tc[4]
-1.02090000e+03 * invT;
} else {
/*species 1: O2 */
species[1] =
+2.69757819e+00
+3.06759845e-04 * tc[1]
-4.19613997e-08 * tc[2]
+4.43820370e-12 * tc[3]
-2.27287062e-16 * tc[4]
-1.23393018e+03 * invT;
/*species 2: N2 */
species[2] =
+1.92664000e+00
+7.43988500e-04 * tc[1]
-1.89492033e-07 * tc[2]
+2.52426000e-11 * tc[3]
-1.35067020e-15 * tc[4]
-9.22797700e+02 * invT;
}
/*species with midpoint at T=1391 kelvin */
if (T < 1391) {
/*species 0: NC7H16 */
species[0] =
-2.26836187e+00
+4.27177910e-02 * tc[1]
-1.75115595e-05 * tc[2]
+4.07364302e-09 * tc[3]
-4.04789850e-13 * tc[4]
-2.56586565e+04 * invT;
} else {
/*species 0: NC7H16 */
species[0] =
+2.12148969e+01
+1.73837875e-02 * tc[1]
-3.94690430e-06 * tc[2]
+4.58246195e-10 * tc[3]
-2.12260532e-14 * tc[4]
-3.42760081e+04 * invT;
}
return;
}
/*compute the h/(RT) at the given temperature (Eq 20) */
/*tc contains precomputed powers of T, tc[0] = log(T) */
void speciesEnthalpy(double * restrict species, double * restrict tc)
{
/*temperature */
double T = tc[1];
double invT = 1 / T;
/*species with midpoint at T=1000 kelvin */
if (T < 1000) {
/*species 1: O2 */
species[1] =
+3.21293640e+00
+5.63743175e-04 * tc[1]
-1.91871682e-07 * tc[2]
+3.28469308e-10 * tc[3]
-1.75371078e-13 * tc[4]
-1.00524902e+03 * invT;
/*species 2: N2 */
species[2] =
+3.29867700e+00
+7.04120000e-04 * tc[1]
-1.32107400e-06 * tc[2]
+1.41037875e-09 * tc[3]
-4.88971000e-13 * tc[4]
-1.02090000e+03 * invT;
} else {
/*species 1: O2 */
species[1] =
+3.69757819e+00
+3.06759845e-04 * tc[1]
-4.19613997e-08 * tc[2]
+4.43820370e-12 * tc[3]
-2.27287062e-16 * tc[4]
-1.23393018e+03 * invT;
/*species 2: N2 */
species[2] =
+2.92664000e+00
+7.43988500e-04 * tc[1]
-1.89492033e-07 * tc[2]
+2.52426000e-11 * tc[3]
-1.35067020e-15 * tc[4]
-9.22797700e+02 * invT;
}
/*species with midpoint at T=1391 kelvin */
if (T < 1391) {
/*species 0: NC7H16 */
species[0] =
-1.26836187e+00
+4.27177910e-02 * tc[1]
-1.75115595e-05 * tc[2]
+4.07364302e-09 * tc[3]
-4.04789850e-13 * tc[4]
-2.56586565e+04 * invT;
} else {
/*species 0: NC7H16 */
species[0] =
+2.22148969e+01
+1.73837875e-02 * tc[1]
-3.94690430e-06 * tc[2]
+4.58246195e-10 * tc[3]
-2.12260532e-14 * tc[4]
-3.42760081e+04 * invT;
}
return;
}
/*compute the S/R at the given temperature (Eq 21) */
/*tc contains precomputed powers of T, tc[0] = log(T) */
void speciesEntropy(double * restrict species, double * restrict tc)
{
/*temperature */
double T = tc[1];
/*species with midpoint at T=1000 kelvin */
if (T < 1000) {
/*species 1: O2 */
species[1] =
+3.21293640e+00 * tc[0]
+1.12748635e-03 * tc[1]
-2.87807523e-07 * tc[2]
+4.37959077e-10 * tc[3]
-2.19213848e-13 * tc[4]
+6.03473759e+00 ;
/*species 2: N2 */
species[2] =
+3.29867700e+00 * tc[0]
+1.40824000e-03 * tc[1]
-1.98161100e-06 * tc[2]
+1.88050500e-09 * tc[3]
-6.11213750e-13 * tc[4]
+3.95037200e+00 ;
} else {
/*species 1: O2 */
species[1] =
+3.69757819e+00 * tc[0]
+6.13519689e-04 * tc[1]
-6.29420995e-08 * tc[2]
+5.91760493e-12 * tc[3]
-2.84108828e-16 * tc[4]
+3.18916559e+00 ;
/*species 2: N2 */
species[2] =
+2.92664000e+00 * tc[0]
+1.48797700e-03 * tc[1]
-2.84238050e-07 * tc[2]
+3.36568000e-11 * tc[3]
-1.68833775e-15 * tc[4]
+5.98052800e+00 ;
}
/*species with midpoint at T=1391 kelvin */
if (T < 1391) {
/*species 0: NC7H16 */
species[0] =
-1.26836187e+00 * tc[0]
+8.54355820e-02 * tc[1]
-2.62673393e-05 * tc[2]
+5.43152403e-09 * tc[3]
-5.05987313e-13 * tc[4]
+3.53732912e+01 ;
} else {
/*species 0: NC7H16 */
species[0] =
+2.22148969e+01 * tc[0]
+3.47675750e-02 * tc[1]
-5.92035645e-06 * tc[2]
+6.10994927e-10 * tc[3]
-2.65325665e-14 * tc[4]
-9.23040196e+01 ;
}
return;
}
/*save molecular weights into array */
void molecularWeight(double * restrict wt)
{
wt[0] = 100.205570; /*NC7H16 */
wt[1] = 31.998800; /*O2 */
wt[2] = 28.013400; /*N2 */
return;
}
/*save atomic weights into array */
void atomicWeight(double * restrict awt)
{
awt[0] = 12.011150; /*C */
awt[1] = 1.007970; /*H */
awt[2] = 15.999400; /*O */
awt[3] = 14.006700; /*N */
return;
}
/* get temperature given internal energy in mass units and mass fracs */
void GET_T_GIVEN_EY(double * restrict e, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict t, int * ierr)
{
#ifdef CONVERGENCE
const int maxiter = 5000;
const double tol = 1.e-12;
#else
const int maxiter = 200;
const double tol = 1.e-6;
#endif
double ein = *e;
double tmin = 90;/*max lower bound for thermo def */
double tmax = 4000;/*min upper bound for thermo def */
double e1,emin,emax,cv,t1,dt;
int i;/* loop counter */
CKUBMS(&tmin, y, iwrk, rwrk, &emin);
CKUBMS(&tmax, y, iwrk, rwrk, &emax);
if (ein < emin) {
/*Linear Extrapolation below tmin */
CKCVBS(&tmin, y, iwrk, rwrk, &cv);
*t = tmin - (emin-ein)/cv;
*ierr = 1;
return;
}
if (ein > emax) {
/*Linear Extrapolation above tmax */
CKCVBS(&tmax, y, iwrk, rwrk, &cv);
*t = tmax - (emax-ein)/cv;
*ierr = 1;
return;
}
t1 = *t;
if (t1 < tmin || t1 > tmax) {
t1 = tmin + (tmax-tmin)/(emax-emin)*(ein-emin);
}
for (i = 0; i < maxiter; ++i) {
CKUBMS(&t1,y,iwrk,rwrk,&e1);
CKCVBS(&t1,y,iwrk,rwrk,&cv);
dt = (ein - e1) / cv;
if (dt > 100.) { dt = 100.; }
else if (dt < -100.) { dt = -100.; }
else if (fabs(dt) < tol) break;
else if (t1+dt == t1) break;
t1 += dt;
}
*t = t1;
*ierr = 0;
return;
}
/* get temperature given enthalpy in mass units and mass fracs */
void GET_T_GIVEN_HY(double * restrict h, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict t, int * ierr)
{
#ifdef CONVERGENCE
const int maxiter = 5000;
const double tol = 1.e-12;
#else
const int maxiter = 200;
const double tol = 1.e-6;
#endif
double hin = *h;
double tmin = 90;/*max lower bound for thermo def */
double tmax = 4000;/*min upper bound for thermo def */
double h1,hmin,hmax,cp,t1,dt;
int i;/* loop counter */
CKHBMS(&tmin, y, iwrk, rwrk, &hmin);
CKHBMS(&tmax, y, iwrk, rwrk, &hmax);
if (hin < hmin) {
/*Linear Extrapolation below tmin */
CKCPBS(&tmin, y, iwrk, rwrk, &cp);
*t = tmin - (hmin-hin)/cp;
*ierr = 1;
return;
}
if (hin > hmax) {
/*Linear Extrapolation above tmax */
CKCPBS(&tmax, y, iwrk, rwrk, &cp);
*t = tmax - (hmax-hin)/cp;
*ierr = 1;
return;
}
t1 = *t;
if (t1 < tmin || t1 > tmax) {
t1 = tmin + (tmax-tmin)/(hmax-hmin)*(hin-hmin);
}
for (i = 0; i < maxiter; ++i) {
CKHBMS(&t1,y,iwrk,rwrk,&h1);
CKCPBS(&t1,y,iwrk,rwrk,&cp);
dt = (hin - h1) / cp;
if (dt > 100.) { dt = 100.; }
else if (dt < -100.) { dt = -100.; }
else if (fabs(dt) < tol) break;
else if (t1+dt == t1) break;
t1 += dt;
}
*t = t1;
*ierr = 0;
return;
}
/*compute the critical parameters for each species */
void GET_CRITPARAMS(double * restrict Tci, double * restrict ai, double * restrict bi, double * restrict acentric_i)
{
double EPS[3];
double SIG[3];
double wt[3];
double avogadro = 6.02214199e23;
double boltzmann = 1.3806503e-16; //we work in CGS
double Rcst = 83.144598; //in bar [CGS] !
egtransetEPS(EPS);
egtransetSIG(SIG);
molecularWeight(wt);
/*species 0: NC7H16 */
Tci[0] = 1.316 * EPS[0] ;
ai[0] = (5.55 * pow(avogadro,2.0) * EPS[0]*boltzmann * pow(1e-8*SIG[0],3.0) ) / (pow(wt[0],2.0));
bi[0] = 0.855 * avogadro * pow(1e-8*SIG[0],3.0) / (wt[0]);
acentric_i[0] = 0.0 ;
/*species 1: O2 */
/*Imported from NIST */
Tci[1] = 154.581000 ;
ai[1] = 1e6 * 0.42748 * pow(Rcst,2.0) * pow(Tci[1],2.0) / (pow(31.998800,2.0) * 50.430466);
bi[1] = 0.08664 * Rcst * Tci[1] / (31.998800 * 50.430466);
acentric_i[1] = 0.022200 ;
/*species 2: N2 */
/*Imported from NIST */
Tci[2] = 126.192000 ;
ai[2] = 1e6 * 0.42748 * pow(Rcst,2.0) * pow(Tci[2],2.0) / (pow(28.013400,2.0) * 33.958000);
bi[2] = 0.08664 * Rcst * Tci[2] / (28.013400 * 33.958000);
acentric_i[2] = 0.037200 ;
return;
}
/* End of file */
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetLENIMC EGTRANSETLENIMC
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetLENIMC egtransetlenimc
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetLENIMC egtransetlenimc_
#endif
void egtransetLENIMC(int* LENIMC) {
*LENIMC = 12;}
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetLENRMC EGTRANSETLENRMC
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetLENRMC egtransetlenrmc
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetLENRMC egtransetlenrmc_
#endif
void egtransetLENRMC(int* LENRMC) {
*LENRMC = 252;}
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetNO EGTRANSETNO
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetNO egtransetno
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetNO egtransetno_
#endif
void egtransetNO(int* NO) {
*NO = 4;}
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetKK EGTRANSETKK
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetKK egtransetkk
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetKK egtransetkk_
#endif
void egtransetKK(int* KK) {
*KK = 3;}
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetNLITE EGTRANSETNLITE
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetNLITE egtransetnlite
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetNLITE egtransetnlite_
#endif
void egtransetNLITE(int* NLITE) {
*NLITE = 0;}
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetPATM EGTRANSETPATM
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetPATM egtransetpatm
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetPATM egtransetpatm_
#endif
void egtransetPATM(double* PATM) {
*PATM = 0.1013250000000000E+07;}
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetWT EGTRANSETWT
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetWT egtransetwt
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetWT egtransetwt_
#endif
void egtransetWT(double* WT) {
WT[ 0] = 0.1002055721282959E+03;
WT[ 1] = 0.3199880027770996E+02;
WT[ 2] = 0.2801339912414551E+02;
};
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetEPS EGTRANSETEPS
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetEPS egtranseteps
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetEPS egtranseteps_
#endif
void egtransetEPS(double* EPS) {
EPS[ 0] = 0.4596000000000000E+03;
EPS[ 1] = 0.1074000000000000E+03;
EPS[ 2] = 0.9753000000000000E+02;
};
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetSIG EGTRANSETSIG
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetSIG egtransetsig
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetSIG egtransetsig_
#endif
void egtransetSIG(double* SIG) {
SIG[ 0] = 0.6253000000000000E+01;
SIG[ 1] = 0.3458000000000000E+01;
SIG[ 2] = 0.3621000000000000E+01;
};
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetDIP EGTRANSETDIP
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetDIP egtransetdip
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetDIP egtransetdip_
#endif
void egtransetDIP(double* DIP) {
DIP[ 0] = 0.0000000000000000E+00;
DIP[ 1] = 0.0000000000000000E+00;
DIP[ 2] = 0.0000000000000000E+00;
};
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetPOL EGTRANSETPOL
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetPOL egtransetpol
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetPOL egtransetpol_
#endif
void egtransetPOL(double* POL) {
POL[ 0] = 0.0000000000000000E+00;
POL[ 1] = 0.1600000000000000E+01;
POL[ 2] = 0.1760000000000000E+01;
};
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetZROT EGTRANSETZROT
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetZROT egtransetzrot
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetZROT egtransetzrot_
#endif
void egtransetZROT(double* ZROT) {
ZROT[ 0] = 0.1000000000000000E+01;
ZROT[ 1] = 0.3800000000000000E+01;
ZROT[ 2] = 0.4000000000000000E+01;
};
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetNLIN EGTRANSETNLIN
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetNLIN egtransetnlin
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetNLIN egtransetnlin_
#endif
void egtransetNLIN(int* NLIN) {
NLIN[ 0] = 2;
NLIN[ 1] = 1;
NLIN[ 2] = 1;
};
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetCOFLAM EGTRANSETCOFLAM
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetCOFLAM egtransetcoflam
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetCOFLAM egtransetcoflam_
#endif
void egtransetCOFLAM(double* COFLAM) {
COFLAM[ 0] = -0.2374955066871987E+02;
COFLAM[ 1] = 0.9849452684046382E+01;
COFLAM[ 2] = -0.9670910898851568E+00;
COFLAM[ 3] = 0.3340196970412866E-01;
COFLAM[ 4] = -0.2128535787521960E+01;
COFLAM[ 5] = 0.2989596575804116E+01;
COFLAM[ 6] = -0.2874009536723909E+00;
COFLAM[ 7] = 0.1240808902994658E-01;
COFLAM[ 8] = 0.7598771527433207E+01;
COFLAM[ 9] = -0.1179708291725760E+01;
COFLAM[ 10] = 0.3029588826051161E+00;
COFLAM[ 11] = -0.1538941570998816E-01;
};
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetCOFETA EGTRANSETCOFETA
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetCOFETA egtransetcofeta
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetCOFETA egtransetcofeta_
#endif
void egtransetCOFETA(double* COFETA) {
COFETA[ 0] = -0.2432749422887456E+02;
COFETA[ 1] = 0.4512237539250725E+01;
COFETA[ 2] = -0.4358449793033571E+00;
COFETA[ 3] = 0.1631778523682478E-01;
COFETA[ 4] = -0.1602272818500447E+02;
COFETA[ 5] = 0.2173986581363982E+01;
COFETA[ 6] = -0.1980867737685871E+00;
COFETA[ 7] = 0.8538618302987773E-02;
COFETA[ 8] = -0.1554301373538047E+02;
COFETA[ 9] = 0.1934050217437019E+01;
COFETA[ 10] = -0.1673658048743626E+00;
COFETA[ 11] = 0.7228254732832448E-02;
};
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetCOFD EGTRANSETCOFD
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetCOFD egtransetcofd
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetCOFD egtransetcofd_
#endif
void egtransetCOFD(double* COFD) {
COFD[ 0] = -0.2264598829478694E+02;
COFD[ 1] = 0.4885932125928444E+01;
COFD[ 2] = -0.3561378207389411E+00;
COFD[ 3] = 0.1302854521257673E-01;
COFD[ 4] = -0.2053512726280661E+02;
COFD[ 5] = 0.4811654463130417E+01;
COFD[ 6] = -0.3851799949442143E+00;
COFD[ 7] = 0.1567263580162197E-01;
COFD[ 8] = -0.2039977939342515E+02;
COFD[ 9] = 0.4790114386471026E+01;
COFD[ 10] = -0.3847720267012513E+00;
COFD[ 11] = 0.1574463250930258E-01;
COFD[ 12] = -0.2053512726280661E+02;
COFD[ 13] = 0.4811654463130417E+01;
COFD[ 14] = -0.3851799949442143E+00;
COFD[ 15] = 0.1567263580162197E-01;
COFD[ 16] = -0.1506605758131460E+02;
COFD[ 17] = 0.3251697055272910E+01;
COFD[ 18] = -0.2050406569622160E+00;
COFD[ 19] = 0.8763175375306562E-02;
COFD[ 20] = -0.1475676271878258E+02;
COFD[ 21] = 0.3131581686069595E+01;
COFD[ 22] = -0.1897184400551578E+00;
COFD[ 23] = 0.8111551924229800E-02;
COFD[ 24] = -0.2039977939342515E+02;
COFD[ 25] = 0.4790114386471026E+01;
COFD[ 26] = -0.3847720267012513E+00;
COFD[ 27] = 0.1574463250930258E-01;
COFD[ 28] = -0.1475676271878258E+02;
COFD[ 29] = 0.3131581686069595E+01;
COFD[ 30] = -0.1897184400551578E+00;
COFD[ 31] = 0.8111551924229800E-02;
COFD[ 32] = -0.1453137348852218E+02;
COFD[ 33] = 0.3046804301681465E+01;
COFD[ 34] = -0.1793667137201395E+00;
COFD[ 35] = 0.7690228126606375E-02;
};
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetKTDIF EGTRANSETKTDIF
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetKTDIF egtransetktdif
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetKTDIF egtransetktdif_
#endif
void egtransetKTDIF(int* KTDIF) {
};
#if defined(BL_FORT_USE_UPPERCASE)
#define egtransetCOFTD EGTRANSETCOFTD
#elif defined(BL_FORT_USE_LOWERCASE)
#define egtransetCOFTD egtransetcoftd
#elif defined(BL_FORT_USE_UNDERSCORE)
#define egtransetCOFTD egtransetcoftd_
#endif
void egtransetCOFTD(double* COFTD) {
};
#if 0
\\
\\
\\ This is the mechanism file
\\
\\
!*******************************
! Dummy "mechanism" to represent
! a simple 3species system for
! n-heptane non-reacting spray
!*******************************
ELEMENTS
C H O N
END
SPECIES
NC7H16 O2 N2
END
REACTIONS
END
\\
\\
\\ This is the therm file
\\
\\
THERMO ALL
300.0 1000.0 5000.0
NC7H16 7/19/ 0 THERMC 7H 16 0 0G 300.000 5000.000 1391.000 61
2.22148969E+01 3.47675750E-02-1.18407129E-05 1.83298478E-09-1.06130266E-13 2
-3.42760081E+04-9.23040196E+01-1.26836187E+00 8.54355820E-02-5.25346786E-05 3
1.62945721E-08-2.02394925E-12-2.56586565E+04 3.53732912E+01 4
H 120186H 1 G 0300.00 5000.00 1000.00 1
2.50000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 2
2.54716270E+04-4.60117638E-01 2.50000000E+00 0.00000000E+00 0.00000000E+00 3
0.00000000E+00 0.00000000E+00 2.54716270E+04-4.60117608E-01 4
O 120186O 1 G 0300.00 5000.00 1000.00 1
2.54205966E+00-2.75506191E-05-3.10280335E-09 4.55106742E-12-4.36805150E-16 2
2.92308027E+04 4.92030811E+00 2.94642878E+00-1.63816649E-03 2.42103170E-06 3
-1.60284319E-09 3.89069636E-13 2.91476445E+04 2.96399498E+00 4
OH S 9/01O 1H 1 0 0G 200.000 6000.000 1000. 1
2.86472886E+00 1.05650448E-03-2.59082758E-07 3.05218674E-11-1.33195876E-15 2
3.68362875E+03 5.70164073E+00 4.12530561E+00-3.22544939E-03 6.52764691E-06 3
-5.79853643E-09 2.06237379E-12 3.34630913E+03-6.90432960E-01 4.51532273E+03 4
H2 121286H 2 G 0300.00 5000.00 1000.00 1
2.99142337E+00 7.00064411E-04-5.63382869E-08-9.23157818E-12 1.58275179E-15 2
-8.35033997E+02-1.35511017E+00 3.29812431E+00 8.24944174E-04-8.14301529E-07 3
-9.47543433E-11 4.13487224E-13-1.01252087E+03-3.29409409E+00 4
O2 121386O 2 G 0300.00 5000.00 1000.00 1
3.69757819E+00 6.13519689E-04-1.25884199E-07 1.77528148E-11-1.13643531E-15 2
-1.23393018E+03 3.18916559E+00 3.21293640E+00 1.12748635E-03-5.75615047E-07 3
1.31387723E-09-8.76855392E-13-1.00524902E+03 6.03473759E+00 4
H2O 20387H 2O 1 G 0300.00 5000.00 1000.00 1
2.67214561E+00 3.05629289E-03-8.73026011E-07 1.20099639E-10-6.39161787E-15 2
-2.98992090E+04 6.86281681E+00 3.38684249E+00 3.47498246E-03-6.35469633E-06 3
6.96858127E-09-2.50658847E-12-3.02081133E+04 2.59023285E+00 4
HO2 L 5/89H 1O 2 00 00G 200.000 3500.000 1000.000 1
4.01721090E+00 2.23982013E-03-6.33658150E-07 1.14246370E-10-1.07908535E-14 2
1.11856713E+02 3.78510215E+00 4.30179801E+00-4.74912051E-03 2.11582891E-05 3
-2.42763894E-08 9.29225124E-12 2.94808040E+02 3.71666245E+00 1.00021620E+04 4
H2O2 120186H 2O 2 G 0300.00 5000.00 1000.00 1
4.57316685E+00 4.33613639E-03-1.47468882E-06 2.34890357E-10-1.43165356E-14 2
-1.80069609E+04 5.01136959E-01 3.38875365E+00 6.56922581E-03-1.48501258E-07 3
-4.62580552E-09 2.47151475E-12-1.76631465E+04 6.78536320E+00 4
N2 121286N 2 G 0300.00 5000.00 1000.00 1
0.02926640E+02 0.01487977E-01-0.05684761E-05 0.01009704E-08-0.06753351E-13 2
-0.09227977E+04 0.05980528E+02 0.03298677E+02 0.01408240E-01-0.03963222E-04 3
0.05641515E-07-0.02444855E-10-0.01020900E+05 0.03950372E+02 4
END
\\
\\
\\ This is the tran file
\\
\\
AR 0 136.500 3.330 0.000 0.000 0.000
C 0 71.400 3.298 0.000 0.000 0.000 ! *
CH 1 80.000 2.750 0.000 0.000 0.000
CH2 1 144.000 3.800 0.000 0.000 0.000
CH2* 1 144.000 3.800 0.000 0.000 0.000
CH3 1 144.000 3.800 0.000 0.000 0.000
CH4 2 141.400 3.746 0.000 2.600 13.000
CO 1 98.100 3.650 0.000 1.950 1.800
CO2 1 244.000 3.763 0.000 2.650 2.100
HCO 2 498.000 3.590 0.000 0.000 0.000
CH2O 2 498.000 3.590 0.000 0.000 2.000
CH2OH 2 417.000 3.690 1.700 0.000 2.000
CH3O 2 417.000 3.690 1.700 0.000 2.000
CH3OH 2 481.800 3.626 0.000 0.000 1.000 ! SVE
C2 1 97.530 3.621 0.000 1.760 4.000
C2O 1 232.400 3.828 0.000 0.000 1.000 ! *
C2H 1 209.000 4.100 0.000 0.000 2.500
C2H2 1 209.000 4.100 0.000 0.000 2.500
H2CC 2 209.000 4.100 0.000 0.000 2.500
C2H3 2 209.000 4.100 0.000 0.000 1.000 ! *
C2H4 2 280.800 3.971 0.000 0.000 1.500
C2H5 2 252.300 4.302 0.000 0.000 1.500
C2H6 2 252.300 4.302 0.000 0.000 1.500
HCCO 2 150.000 2.500 0.000 0.000 1.000 ! *
HCCOH 2 436.000 3.970 0.000 0.000 2.000
CH2CO 2 436.000 3.970 0.000 0.000 2.000
CH2CHO 2 436.000 3.970 0.000 0.000 2.000
C2H2OH 2 224.700 4.162 0.000 0.000 1.000 ! *
C3H2 2 209.000 4.100 0.000 0.000 1.000 ! *
C3H3 2 252.000 4.760 0.000 0.000 1.000 ! JAM
aC3H4 1 252.000 4.760 0.000 0.000 1.000
pC3H4 1 252.000 4.760 0.000 0.000 1.000
cC3H4 1 252.000 4.760 0.000 0.000 1.000
CH2OCH2 1 252.000 4.760 0.000 0.000 1.000
CH2OCH 1 252.000 4.760 0.000 0.000 1.000
CH3CH2CHO 1 252.000 4.760 0.000 0.000 1.000
C4H 1 357.000 5.180 0.000 0.000 1.000
C4H2 1 357.000 5.180 0.000 0.000 1.000
H2C4O 2 357.000 5.180 0.000 0.000 1.000 ! JAM
C4H2OH 2 224.700 4.162 0.000 0.000 1.000 ! *
iC4H3 2 357.000 5.180 0.000 0.000 1.000 ! JAM
nC4H3 2 357.000 5.180 0.000 0.000 1.000 ! JAM
C4H4 2 357.000 5.180 0.000 0.000 1.000 ! JAM
iC4H5 2 357.000 5.180 0.000 0.000 1.000 ! JAM
nC4H5 2 357.000 5.180 0.000 0.000 1.000 ! JAM
C4H5-2 2 357.000 5.180 0.000 0.000 1.000 !
C4H6 2 357.000 5.180 0.000 0.000 1.000
C4H6-2 2 357.000 5.180 0.000 0.000 1.000
C4H612 2 357.000 5.180 0.000 0.000 1.000
CH3CHOCH2 2 357.000 5.180 0.000 0.000 1.000
C5H2 1 357.000 5.180 0.000 0.000 1.000
C5H3 1 357.000 5.180 0.000 0.000 1.000
C5H5 1 357.000 5.180 0.000 0.000 1.000
C5H6 1 357.000 5.180 0.000 0.000 1.000
lC5H7 1 357.000 5.180 0.000 0.000 1.000
C4H6O25 1 357.000 5.180 0.000 0.000 1.000
C4H6O23 1 357.000 5.180 0.000 0.000 1.000
C4H4O 1 357.000 5.180 0.000 0.000 1.000
CH2CHCO 1 357.000 5.180 0.000 0.000 1.000
CH3CHOCH2 1 357.000 5.180 0.000 0.000 1.000
CH2CHCHCHO 1 357.000 5.180 0.000 0.000 1.000
CH3CHCHCO 1 357.000 5.180 0.000 0.000 1.000
C2H3CHOCH2 1 357.000 5.180 0.000 0.000 1.000
CH3CHCHCHO 1 357.000 5.180 0.000 0.000 1.000
C6H 1 357.000 5.180 0.000 0.000 1.000
C6H2 1 357.000 5.180 0.000 0.000 1.000
C6H3 2 357.000 5.180 0.000 0.000 1.000 !
l-C6H4 2 412.300 5.349 0.000 0.000 1.000 !(JAM)
nC6H5 2 412.300 5.349 0.000 0.000 1.000 !(JAM)
i-C6H5 2 412.300 5.349 0.000 0.000 1.000 !(JAM)
l-C6H6 2 412.300 5.349 0.000 0.000 1.000 !(SVE)
n-C6H7 2 412.300 5.349 0.000 0.000 1.000 !(JAM)
i-C6H7 2 412.300 5.349 0.000 0.000 1.000 !(JAM)
C6H8 2 412.300 5.349 0.000 0.000 1.000 !(JAM)
NC7H16 2 459.600 6.253 0.000 0.000 1.000 !TCPC
HE 0 10.200 2.576 0.000 0.000 0.000 ! *
H 0 145.000 2.050 0.000 0.000 0.000
H2 1 38.000 2.920 0.000 0.790 280.000
H2O 2 572.400 2.605 1.844 0.000 4.000
H2O2 2 107.400 3.458 0.000 0.000 3.800
HO2 2 107.400 3.458 0.000 0.000 1.000 ! *
N2 1 97.530 3.621 0.000 1.760 4.000
O 0 80.000 2.750 0.000 0.000 0.000
O2 1 107.400 3.458 0.000 1.600 3.800
OH 1 80.000 2.750 0.000 0.000 0.000
The Lennard-Jones parameters of polycyclic aromatic hydrocarbons were estimated
based on the critical temperature and pressure. See H. Wang and M. Frenklach,
"Transport Properties of Polycyclic Aromatic Hydrocarbons for Flame Modeling."
Combustion and Flame, 96:163-170 (1994)
c-C6H4 2 464.8 5.29 0.00 10.32 0.000 ! benze
C6H6 2 464.8 5.29 0.00 10.32 0.000 ! benze
C6H5 2 464.8 5.29 0.00 10.32 0.000 ! benze
C6H5CH3 2 495.3 5.68 0.43 12.30 1.000 !
C6H5C2H3 2 546.2 6.00 0.13 15.00 1.000 !
C6H5CH2 2 495.3 5.68 0.43 12.30 1.000 !
C6H5C2H 2 535.6 5.72 0.77 12.00 1.000 !
A2 2 630.4 6.18 0.00 16.50 1.000 !
c-C6H7 2 464.8 5.29 0.00 10.32 0.000 ! benze
C5H4O 2 464.8 5.29 0.00 10.32 0.000 ! benze
C5H5O 2 464.8 5.29 0.00 10.32 0.000 ! benze
C5H4OH 2 464.8 5.29 0.00 10.32 0.000 ! benze
C6H5O 2 464.8 5.29 0.00 10.32 0.000 ! benze
C6H5OH 2 464.8 5.29 0.00 10.32 0.000 ! benze
aC3H5 2 266.800 4.982 0.000 0.000 1.000
CH3CCH2 2 266.800 4.982 0.000 0.000 1.000
CH3CHCH 2 266.800 4.982 0.000 0.000 1.000
C3H6 2 266.800 4.982 0.000 0.000 1.000
C3H7 2 266.800 4.982 0.000 0.000 1.000
C4H6 2 357.000 5.180 0.000 0.000 1.000
iC3H7 2 266.800 4.982 0.000 0.000 1.000
nC3H7 2 266.800 4.982 0.000 0.000 1.000
C3H8 2 266.800 4.982 0.000 0.000 1.000
C4H 1 357.000 5.180 0.000 0.000 1.000
C4H2 1 357.000 5.180 0.000 0.000 1.000
C4H2OH 2 224.700 4.162 0.000 0.000 1.000 ! *
iC4H5 2 357.000 5.176 0.000 0.000 1.000
C4H6 2 357.000 5.176 0.000 0.000 1.000
C4H7 2 357.000 5.176 0.000 0.000 1.000
iC4H7 2 357.000 5.176 0.000 0.000 1.000
C4H81 2 357.000 5.176 0.000 0.000 1.000
C4H82 2 357.000 5.176 0.000 0.000 1.000
iC4H8 2 357.000 5.176 0.000 0.000 1.000
tC4H9 2 357.000 5.176 0.000 0.000 1.000
iC4H9 2 357.000 5.176 0.000 0.000 1.000
pC4H9 2 357.000 5.176 0.000 0.000 1.000
sC4H9 2 357.000 5.176 0.000 0.000 1.000
C4H10 2 357.000 5.176 0.000 0.000 1.000
iC4H10 2 357.000 5.176 0.000 0.000 1.000
CH3COCH3 2 357.000 5.176 0.000 0.000 1.000
C2H3CHO 2 357.000 5.176 0.000 0.000 1.000
iC4H7O 2 450.000 5.500 0.000 0.000 1.000 ! JAM
CH3CHO 2 436.000 3.970 0.000 0.000 2.000
CH3CO 2 436.000 3.970 0.000 0.000 2.000
C5H5O(2,4) 2 494 5.2 1.6 0.0 1.0
C5H5O(1,2) 2 494 5.2 1.6 0.0 1.0
C5H5O(1,3) 2 494 5.2 1.6 0.0 1.0
C4H5 2 329 5.1 0.0 0.0 1.0
c-C4H5 2 329 5.1 0.0 0.0 1.0
C6H5CO 2 593 5.5 2.8 0.0 1.0
C6H5CHO 2 593 5.47 2.8 0.0 1.0
C6H5C2H5 2 485 5.425 0.4 0.0 1.0
C6H4O2 2 485 5.425 0.4 0.0 1.0
HOC6H4CH3 2 567 5.60 1.6 0.0 1.0
C6H5CH2OH 2 572 5.82 1.7 0.0 1.0
bi-C6H5CH2 2 620 7.24 0.0 0.0 1.0
C5H5OH 2 464.800 5.290 0.000 10.320 0.000 ! as C5H4OH, ZD99
C5H4OH 2 464.800 5.290 0.000 10.320 0.000 ! benze
o-C6H4 2 464.8 5.29 0.00 10.32 0.000 ! benze
C6H5C6H5 2 676.5 6.31 0.00 20.00 1.000 ! biphe
OC6H4CH3 2 567 5.6 1.6 0.0 1.000
C10H8 2 630.4 6.18 0.00 16.50 1.000 ! napht
halene
C6H4CH3 2 495.3 5.68 0.43 12.30 1.000 !
1-15: Species name
16-80: Molecular parameters
molecule index: 0 = atom, 1= linear molec.
2 = nonlinear molec.
L-J potential well depth, e/kb (K)
L-J collision diameter, s,
Dipole moment, f, Debye
Polarizability, `, 2
Rotational relaxation number, Zrot at 298K
Comments
#endif
|
GxB_BinaryOp_ytype.c | //------------------------------------------------------------------------------
// GxB_BinaryOp_ytype: return the type of y for z=f(x,y)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// NOTE: this function is historical. Use GxB_BinaryOp_ytype_name instead.
#include "GB.h"
GrB_Info GxB_BinaryOp_ytype // type of y
(
GrB_Type *ytype, // return type of input y
GrB_BinaryOp binaryop // binary operator to query
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_BinaryOp_ytype (&ytype, binaryop)") ;
GB_RETURN_IF_NULL (ytype) ;
GB_RETURN_IF_NULL_OR_FAULTY (binaryop) ;
ASSERT_BINARYOP_OK (binaryop, "binaryop for ytype", GB0) ;
//--------------------------------------------------------------------------
// return the ytype
//--------------------------------------------------------------------------
(*ytype) = binaryop->ytype ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
convolution_channel.c | /*
* Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* CSI-NN2 version 1.12.x */
#include "csi_ref.h"
static float csi_ref_uint8_to_float_channel(uint8_t i, float scale, int32_t zero_point)
{
return ((float)i - zero_point) * scale;
}
static float csi_ref_int8_to_float_channel(int8_t i, float scale, int32_t zero_point)
{
return ((float)i - zero_point) * scale;
}
static int channel_kernel_to_common(struct csi_tensor *float_kernel, struct csi_tensor *o_kernel,
struct conv2d_params *params)
{
float *float_kernel_data = float_kernel->data;
int kernel_size = csi_tensor_size(o_kernel);
for (int i = 0; i < o_kernel->dim[0]; i++) {
int per_cahnnel = kernel_size / o_kernel->dim[0];
for (int j = 0; j < per_cahnnel; j++) {
int index = i * per_cahnnel + j;
if (o_kernel->dtype == CSINN_DTYPE_UINT8) {
uint8_t *kernel_data = o_kernel->data;
float_kernel_data[index] = csi_ref_uint8_to_float_channel(kernel_data[index],
o_kernel->qinfo[i].scale, o_kernel->qinfo[i].zero_point);
} else if (o_kernel->dtype == CSINN_DTYPE_INT8) {
int8_t *kernel_data = o_kernel->data;
float_kernel_data[index] = csi_ref_int8_to_float_channel(kernel_data[index],
o_kernel->qinfo[i].scale, o_kernel->qinfo[i].zero_point);
} else {
return CSINN_FALSE;
}
}
}
}
static void channel_bias_to_common(struct csi_tensor *float_bias, struct csi_tensor *bias,
struct csi_tensor *input, struct csi_tensor *kernel)
{
int32_t *bias_data = bias->data;
float *float_bias_data = float_bias->data;
int bias_size = csi_tensor_size(bias);
for (int i = 0; i < bias_size; i++) {
float_bias_data[i] = bias_data[i] * kernel->qinfo[i].scale * input->qinfo->scale;
}
}
static int csi_ref_conv2d_channel_nchw_quant(struct csi_tensor *o_input,
struct csi_tensor *o_output,
struct csi_tensor *o_kernel,
struct csi_tensor *o_bias,
struct conv2d_params *params)
{
struct csi_tensor *float_input = csi_ref_convert_float_tensor(o_input);
struct csi_tensor *float_kernel = csi_ref_alloc_float_tensor(o_kernel);
struct csi_tensor *float_bias = csi_ref_alloc_float_tensor(o_bias);
struct csi_tensor *float_output = csi_ref_alloc_float_tensor(o_output);
channel_kernel_to_common(float_kernel, o_kernel, params);
channel_bias_to_common(float_bias, o_bias, o_input, o_kernel);
csi_ref_conv2d_f32(float_input, float_output, float_kernel, float_bias, params);
csi_tensor_data_convert(o_output, float_output);
csi_ref_conv_free_float_tensor(float_input, float_output, float_kernel, float_bias);
return CSINN_TRUE;
}
static int csi_ref_depthwise_conv2d_channel_nchw_u8(struct csi_tensor *o_input,
struct csi_tensor *o_output,
struct csi_tensor *o_kernel,
struct csi_tensor *o_bias,
struct conv2d_params *params)
{
struct csi_tensor* input;
struct csi_tensor* output;
struct csi_tensor* kernel;
struct csi_tensor* bias = o_bias;
input = csi_ref_nchw_to_nhwc_8(o_input);
kernel = csi_ref_nchw_to_nhwc_8(o_kernel);
output = csi_ref_nchw_to_nhwc_8(o_output);
uint8_t *input_data = input->data;
uint8_t *output_data = output->data;
uint8_t *kernel_data = kernel->data;
int32_t *bias_data = bias->data;
const int32_t dilation_width_factor = params->dilation_width;
const int32_t dilation_height_factor = params->dilation_height;
const int32_t batches = input->dim[0];
const int32_t input_depth = input->dim[3];
const int32_t output_depth = output->dim[3];
const int32_t input_height = input->dim[1];
const int32_t input_width = input->dim[2];
const int32_t filter_height = kernel->dim[1];
const int32_t filter_width = kernel->dim[2];
const int32_t output_height = output->dim[1];
const int32_t output_width = output->dim[2];
const int32_t depth_multiplier = output_depth / input_depth;
const int32_t input_offset = input->qinfo->zero_point;
const int32_t output_offset = output->qinfo->zero_point;
const int32_t output_multiplier = output->qinfo->multiplier;
const int32_t output_shift = output->qinfo->shift;
for (int32_t b = 0; b < batches; ++b) {
#pragma omp parallel for num_threads(8)
for (int32_t out_y = 0; out_y < output_height; ++out_y) {
for (int32_t out_x = 0; out_x < output_width; ++out_x) {
for (int32_t ic = 0; ic < input_depth; ++ic) {
for (int32_t m = 0; m < depth_multiplier; m++) {
const int32_t oc = m + ic * depth_multiplier;
const int32_t in_x_origin = (out_x * params->stride_width) - params->pad_left;
const int32_t in_y_origin = (out_y * params->stride_height) - params->pad_top;
int64_t acc = 0;
for (int32_t filter_y = 0; filter_y < filter_height; ++filter_y) {
for (int32_t filter_x = 0; filter_x < filter_width; ++filter_x) {
const int32_t in_x = in_x_origin + dilation_width_factor * filter_x;
const int32_t in_y =
in_y_origin + dilation_height_factor * filter_y;
// If the location is outside the bounds of the input image,
// use zero as a default value.
if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height)) {
int32_t input_val =
input_data[csi_ref_get_index(input->dim, b, in_y, in_x, ic)];
int32_t filter_val = kernel_data[csi_ref_get_index(
kernel->dim, ic, filter_y, filter_x, m)];
acc +=
(filter_val - o_kernel->qinfo[oc].zero_point) * (input_val - input_offset);
}
}
}
if (bias->dim_count != 0) {
acc += bias_data[oc];
}
uint8_t out = csi_ref_quantize_channel_u8(acc, input, output, o_kernel->qinfo[oc].scale);
output_data[csi_ref_get_index(output->dim, b, out_y, out_x, oc)] = out;
}
}
}
}
}
csi_ref_nhwc_to_nchw_8(o_output, output);
csi_mem_free(input->data);
csi_mem_free(input);
csi_mem_free(kernel->data);
csi_mem_free(kernel);
return CSINN_TRUE;
}
static int csi_ref_depthwise_conv2d_channel_nchw_i8(struct csi_tensor *o_input,
struct csi_tensor *o_output,
struct csi_tensor *o_kernel,
struct csi_tensor *o_bias,
struct conv2d_params *params)
{
struct csi_tensor* input;
struct csi_tensor* output;
struct csi_tensor* kernel;
struct csi_tensor* bias = o_bias;
input = csi_ref_nchw_to_nhwc_8(o_input);
kernel = csi_ref_nchw_to_nhwc_8(o_kernel);
output = csi_ref_nchw_to_nhwc_8(o_output);
int8_t *input_data = input->data;
int8_t *output_data = output->data;
int8_t *kernel_data = kernel->data;
int32_t *bias_data = bias->data;
const int32_t dilation_width_factor = params->dilation_width;
const int32_t dilation_height_factor = params->dilation_height;
const int32_t batches = input->dim[0];
const int32_t input_depth = input->dim[3];
const int32_t output_depth = output->dim[3];
const int32_t input_height = input->dim[1];
const int32_t input_width = input->dim[2];
const int32_t filter_height = kernel->dim[1];
const int32_t filter_width = kernel->dim[2];
const int32_t output_height = output->dim[1];
const int32_t output_width = output->dim[2];
const int32_t depth_multiplier = output_depth / input_depth;
const int32_t input_offset = input->qinfo->zero_point;
const int32_t output_offset = output->qinfo->zero_point;
const int32_t output_multiplier = output->qinfo->multiplier;
const int32_t output_shift = output->qinfo->shift;
for (int32_t b = 0; b < batches; ++b) {
#pragma omp parallel for num_threads(8)
for (int32_t out_y = 0; out_y < output_height; ++out_y) {
for (int32_t out_x = 0; out_x < output_width; ++out_x) {
for (int32_t ic = 0; ic < input_depth; ++ic) {
for (int32_t m = 0; m < depth_multiplier; m++) {
const int32_t oc = m + ic * depth_multiplier;
const int32_t in_x_origin = (out_x * params->stride_width) - params->pad_left;
const int32_t in_y_origin = (out_y * params->stride_height) - params->pad_top;
int64_t acc = 0;
for (int32_t filter_y = 0; filter_y < filter_height; ++filter_y) {
for (int32_t filter_x = 0; filter_x < filter_width; ++filter_x) {
const int32_t in_x = in_x_origin + dilation_width_factor * filter_x;
const int32_t in_y =
in_y_origin + dilation_height_factor * filter_y;
// If the location is outside the bounds of the input image,
// use zero as a default value.
if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height)) {
int32_t input_val =
input_data[csi_ref_get_index(input->dim, b, in_y, in_x, ic)];
int32_t filter_val = kernel_data[csi_ref_get_index(
kernel->dim, ic, filter_y, filter_x, m)];
acc +=
(filter_val - o_kernel->qinfo[oc].zero_point) * (input_val - input_offset);
}
}
}
if (bias->dim_count != 0) {
acc += bias_data[oc];
}
int8_t out = csi_ref_quantize_channel_i8(acc, input, output, o_kernel->qinfo[oc].scale);
output_data[csi_ref_get_index(output->dim, b, out_y, out_x, oc)] = out;
}
}
}
}
}
csi_ref_nhwc_to_nchw_8(o_output, output);
csi_mem_free(input->data);
csi_mem_free(input);
csi_mem_free(kernel->data);
csi_mem_free(kernel);
return CSINN_TRUE;
}
static int csi_ref_group_conv2d_channel_nchw_quant(struct csi_tensor *o_input,
struct csi_tensor *o_output,
struct csi_tensor *o_kernel,
struct csi_tensor *o_bias,
struct conv2d_params *params)
{
struct csi_tensor *input = csi_alloc_tensor(NULL);
struct csi_tensor *output = csi_alloc_tensor(NULL);
struct csi_tensor *kernel = csi_alloc_tensor(NULL);
struct csi_tensor *bias = csi_alloc_tensor(NULL);
struct conv2d_params pparams;
csi_tensor_copy(input, o_input);
csi_tensor_copy(output, o_output);
csi_tensor_copy(kernel, o_kernel);
csi_tensor_copy(bias, o_bias);
memcpy(&pparams, params, sizeof(struct conv2d_params));
input->dim[1] /= params->group;
output->dim[1] /= params->group;
kernel->dim[0] /= params->group;
bias->dim[0] /= params->group;
pparams.group = 1;
int input_size = csi_tensor_size(input);
int output_size = csi_tensor_size(output);
int kernel_size = csi_tensor_size(kernel);
int8_t *input_data = o_input->data;
int8_t *output_data = o_output->data;
int8_t *kernel_data = o_kernel->data;
int32_t *bias_data = o_bias->data;
for (int i = 0; i < params->group; i++) {
input->data = input_data + i * input_size;
output->data = output_data + i * output_size;
kernel->data = kernel_data + i * kernel_size;
if (bias->data && bias->dim_count != 0) {
bias->data = bias_data + i * o_output->dim[1] / params->group;
}
kernel->qinfo = o_kernel->qinfo + i * o_output->dim[1] / params->group;
csi_ref_conv2d_channel_nchw_quant(input, output, kernel, bias, &pparams);
}
return CSINN_TRUE;
}
int csi_ref_conv2d_channel_quant(struct csi_tensor *input,
struct csi_tensor *output,
struct csi_tensor *kernel,
struct csi_tensor *bias,
struct conv2d_params *params)
{
if (params->base.layout == CSINN_LAYOUT_NCHW) {
csi_ref_conv2d_channel_nchw_quant(input, output, kernel, bias, params);
} else {
return CSINN_UNSUPPORT_LAYOUT;
}
}
int csi_ref_conv2d_channel_relu_quant(struct csi_tensor *input,
struct csi_tensor *output,
struct csi_tensor *kernel,
struct csi_tensor *bias,
struct conv2d_params *params)
{
csi_ref_conv2d_channel_quant(input, output, kernel, bias, params);
struct relu_params *rp = csi_mem_alloc(sizeof(struct relu_params));
memcpy(&(rp->base), &(params->base), sizeof(struct csi_params_base));
csi_relu_init(output, output, rp);
csi_relu(output, output, rp);
return CSINN_TRUE;
}
int csi_ref_conv2d_channel_relu6_quant(struct csi_tensor *input,
struct csi_tensor *output,
struct csi_tensor *kernel,
struct csi_tensor *bias,
struct conv2d_params *params)
{
csi_ref_conv2d_channel_quant(input, output, kernel, bias, params);
struct relu_params *rp = csi_mem_alloc(sizeof(struct relu_params));
memcpy(&(rp->base), &(params->base), sizeof(struct csi_params_base));
csi_relu6_init(output, output, rp);
csi_relu6(output, output, rp);
return CSINN_TRUE;
}
int csi_ref_depthwise_conv2d_channel_quant(struct csi_tensor *input,
struct csi_tensor *output,
struct csi_tensor *kernel,
struct csi_tensor *bias,
struct conv2d_params *params)
{
if (params->base.layout == CSINN_LAYOUT_NCHW) {
if (input->dtype == CSINN_DTYPE_UINT8) {
csi_ref_depthwise_conv2d_channel_nchw_u8(input, output, kernel, bias, params);
} else if (input->dtype == CSINN_DTYPE_INT8) {
csi_ref_depthwise_conv2d_channel_nchw_i8(input, output, kernel, bias, params);
} else {
return CSINN_UNSUPPORT_DTYPE;
}
} else {
return CSINN_UNSUPPORT_LAYOUT;
}
}
int csi_ref_depthwise_conv2d_channel_relu_quant(struct csi_tensor *input,
struct csi_tensor *output,
struct csi_tensor *kernel,
struct csi_tensor *bias,
struct conv2d_params *params)
{
csi_ref_depthwise_conv2d_channel_quant(input, output, kernel, bias, params);
struct relu_params *rp = csi_mem_alloc(sizeof(struct relu_params));
memcpy(&(rp->base), &(params->base), sizeof(struct csi_params_base));
csi_relu_init(output, output, rp);
csi_relu(output, output, rp);
}
int csi_ref_depthwise_conv2d_channel_relu6_quant(struct csi_tensor *input,
struct csi_tensor *output,
struct csi_tensor *kernel,
struct csi_tensor *bias,
struct conv2d_params *params)
{
csi_ref_depthwise_conv2d_channel_quant(input, output, kernel, bias, params);
struct relu_params *rp = csi_mem_alloc(sizeof(struct relu_params));
memcpy(&(rp->base), &(params->base), sizeof(struct csi_params_base));
csi_relu6_init(output, output, rp);
csi_relu6(output, output, rp);
}
int csi_ref_group_conv2d_channel_quant(struct csi_tensor *input,
struct csi_tensor *output,
struct csi_tensor *kernel,
struct csi_tensor *bias,
struct conv2d_params *params)
{
if (params->base.layout == CSINN_LAYOUT_NCHW) {
csi_ref_group_conv2d_channel_nchw_quant(input, output, kernel, bias, params);
} else {
return CSINN_UNSUPPORT_LAYOUT;
}
}
int csi_ref_group_conv2d_channel_relu_quant(struct csi_tensor *input,
struct csi_tensor *output,
struct csi_tensor *kernel,
struct csi_tensor *bias,
struct conv2d_params *params)
{
csi_ref_group_conv2d_channel_quant(input, output, kernel, bias, params);
struct relu_params *rp = csi_mem_alloc(sizeof(struct relu_params));
memcpy(&(rp->base), &(params->base), sizeof(struct csi_params_base));
csi_relu_init(output, output, rp);
csi_relu(output, output, rp);
}
|
GB_unaryop__abs_int64_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int64_uint8
// op(A') function: GB_tran__abs_int64_uint8
// C type: int64_t
// A type: uint8_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int64_uint8
(
int64_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int64_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
target_update_array_extension.c | // --------------------------------------------------
// Check 'to' and extends before
// --------------------------------------------------
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=to -DEXTENDS=BEFORE
// RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \
// RUN: | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=to -DEXTENDS=BEFORE
// RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=to -DEXTENDS=BEFORE
// RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=to -DEXTENDS=BEFORE
// RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \
// RUN: | %fcheck-x86_64-pc-linux-gnu
// --------------------------------------------------
// Check 'from' and extends before
// --------------------------------------------------
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=from -DEXTENDS=BEFORE
// RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \
// RUN: | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=from -DEXTENDS=BEFORE
// RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=from -DEXTENDS=BEFORE
// RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=from -DEXTENDS=BEFORE
// RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \
// RUN: | %fcheck-x86_64-pc-linux-gnu
// --------------------------------------------------
// Check 'to' and extends after
// --------------------------------------------------
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=to -DEXTENDS=AFTER
// RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \
// RUN: | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=to -DEXTENDS=AFTER
// RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=to -DEXTENDS=AFTER
// RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=to -DEXTENDS=AFTER
// RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \
// RUN: | %fcheck-x86_64-pc-linux-gnu
// --------------------------------------------------
// Check 'from' and extends after
// --------------------------------------------------
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=from -DEXTENDS=AFTER
// RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \
// RUN: | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=from -DEXTENDS=AFTER
// RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=from -DEXTENDS=AFTER
// RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=from -DEXTENDS=AFTER
// RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \
// RUN: | %fcheck-x86_64-pc-linux-gnu
// END.
#include <stdio.h>
#define BEFORE 0
#define AFTER 1
#if EXTENDS == BEFORE
# define SMALL 2:3
# define LARGE 0:5
#elif EXTENDS == AFTER
# define SMALL 0:3
# define LARGE 0:5
#else
# error EXTENDS undefined
#endif
int main() {
int arr[5];
// CHECK: addr=0x[[#%x,HOST_ADDR:]], size=[[#%u,SIZE:]]
fprintf(stderr, "addr=%p, size=%ld\n", arr, sizeof arr);
// CHECK-NOT: Libomptarget
#pragma omp target data map(alloc: arr[LARGE])
{
#pragma omp target update CLAUSE(present: arr[SMALL])
}
// CHECK: arr is present
fprintf(stderr, "arr is present\n");
// CHECK: Libomptarget message: device mapping required by 'present' motion modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] ([[#SIZE]] bytes)
// CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory
#pragma omp target data map(alloc: arr[SMALL])
{
#pragma omp target update CLAUSE(present: arr[LARGE])
}
// CHECK-NOT: arr is present
fprintf(stderr, "arr is present\n");
return 0;
}
|
coll.h | #ifndef G24_LIB_COLL
#define G24_LIB_COLL
/**********************************************
* *
* BIBLIOTECA DE FUNÇÕES MULTI-DIMENSIONAIS *
* *
* Por: Gonçalo Vaz (87321) *
* Nuno Fernandes (87342) *
* [Antigo Grupo 24 de FC 2017-2018] *
* [Antigo Grupo 25 de Programação 2016-2017] *
* *
**********************************************/
/*!
\file coll.h
\brief Defines `g24_lib::coll`, a mostly deprecated class that holds N-dimensional collections of objects.
\author Nuno Fernandes (main coder, current maintainer)
\author Gonçalo Vaz (auxiliary coder and tester)
*/
namespace g24_lib
{
/*!
\brief Holds a run-time defined number of objects in a run-time defined number of dimensions.
\tparam Type The type of the elements of the array.
\tparam indexer The type that should be able to hold the size of the array.
\pre \p indexer must satisfy the constraints placed on the choice of \ref g24_lib::Definitions::default_unsigned_indexer.
\details Suppose we want to store something in `n+1` dimensions,
with `k_0`, `k_1`, ... , `k_n` elements in each dimension.
`g24_lib::coll` will allocate an array of `k_0 * k_1 * ... * k_n` elements.
The elements at each dimension will be stored at decreasing strides,
much in the same way as native multidimensional static arrays
(such as `int x[10][11][12]`).
\details The strides between each element in a given dimension will be:
`k_1 * k_2 * ... * k_n`, `k_2 * ... * k_n`, ... , `k_n` , `1`
(that is, in the last dimension the elements are stored contiguosly).
\deprecated After seeing much use during the projects that originated the first versions of this library,
the authors eventually concluded that the run-time defined number of dimensions
was not a significant advantage, quite the opposite, since it forced the usage
of the (also deprecated) `g24_lib::point` class and more dynamic allocations
than desireable. A more reasonable alternative is to use `g24_lib::ndview`
to specify a run-time number of elements in a compile-time number of dimensions
and couple this with any array (not necessarily `g24_lib::simple_array`, though
that would be the most natural choice for this library...) using `g24_lib::ndview::array_size()`
to get the necessary number of elements and access ordered positions through `g24_lib::ndview`.
*/
template <class Type, class indexer = g24_lib::Definitions::default_unsigned_indexer> class coll
{
public:
typedef Type value_type;
typedef indexer indexer_type;
private:
/*!
\brief The actual array.
*/
Type *a;
/*!
\brief The number of dimensions.
*/
indexer dim;
/*!
\brief The total size of the array.
*/
indexer Size;
/*!
\brief The number of elements per dimension.
*/
point<indexer, indexer> nums;
/*!
\brief The stride between the elements in each dimension.
*/
point<indexer, indexer> sep;
public:
/*!
\brief Returns the number of dimensions of the `coll`.
*/
inline indexer dimension() const
{
return dim;
}
/*!
\brief Returns the total number of elements of the `coll`.
*/
inline indexer size() const
{
return Size;
}
/*!
\brief Returns a `g24_lib::point` that holds the number of elements in each dimension.
*/
inline point<indexer, indexer> numbers() const
{
return nums;
}
/*!
\brief Returns the number of elements in the dimension \p i.
*/
inline indexer numbers(const indexer i) const
{
return nums[i];
}
/*!
\brief Returns a `g24_lib::point` that holds the stride between each element in each dimension.
*/
inline point<indexer, indexer> separation() const
{
return sep;
}
/*!
\brief Returns the stride between each element in the dimension \p i.
*/
inline indexer separation(const indexer i) const
{
return sep[i];
}
/*!
\brief Gives direct access to the underlying array.
\warning This gives actual access to the raw array. It shouldn't absolutely be used
unless the user is really, really sure of what will be done.
\deprecated This is kept for compatibility with older code,
where copying to CUDA and so on wasn't as well abstracted
as it is now. It really, really shouldn't be needed anymore.
*/
inline Type *& get_access_to_array()
{
return a;
}
/*!
\brief Gives direct access to the underlying array.
\warning This gives actual access to the raw array. It shouldn't absolutely be used
unless the user is really, really sure of what will be done.
\deprecated This is kept for compatibility with older code,
where copying to CUDA and so on wasn't as well abstracted
as it is now. It really, really shouldn't be needed anymore.
*/
inline const Type * get_access_to_array() const
{
return a;
}
/*!
\brief Empty construct.
*/
coll()
{
dim = 0;
Size = 0;
a = nullptr;
}
/*!
\brief Construct with \p d dimensions with \p n elements in each dimension and no initialization.
*/
coll(const indexer d, const indexer n):
nums(d, n),
sep(d)
{
indexer i, j;
Size = fastpow(n,d);
dim = d;
j = Size;
for (i = 0, j = Size; i < dim; ++i)
{
j /= n;
sep[i] = j;
};
a = new Type[Size];
}
/*!
\brief Construct with \p d dimensions with \p n elements in each dimension,
with every element being initialized as \p def.
*/
coll(const indexer d, const indexer n, const Type &def):
nums(d, n),
sep(d)
{
indexer i, j;
Size = fastpow(n,d);
dim = d;
j = Size;
for (i = 0, j = Size; i < dim; ++i)
{
j /= n;
sep[i] = j;
};
a = new Type[Size];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (indexer k = 0; k < Size; ++k)
{
a[k] = def;
}
}
/*!
\brief Construct with \p d dimensions
with the number of elements in each dimension being given by \p ns, with no initialization.
*/
coll(const indexer d, const point<indexer, indexer> &ns) :
nums(ns),
sep(d)
{
indexer i, j;
dim = d;
Size = 1;
for (i = 0; i < d; ++i)
{
Size *=ns[i];
};
for (i = 0, j = Size; i < dim; ++i)
{
j /= ns[i];
sep[i] = j;
};
a = new Type[Size];
}
/*!
\brief Construct with \p d dimensions
with the number of elements in each dimension being given by \p ns,
with every element being initialized as \p def.
*/
coll(const indexer d, const point<indexer, indexer> &ns, const Type &def) :
nums(ns),
sep(d)
{
indexer i, j;
dim = d;
Size = 1;
for (i = 0; i < d; ++i)
{
Size *=ns[i];
};
for (i = 0, j = Size; i < dim; ++i)
{
j /= ns[i];
sep[i] = j;
};
a = new Type[Size];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (indexer k = 0; k < Size; ++k)
{
a[k] = def;
}
}
/*!
\brief Copy constructor.
*/
coll(const coll &c):
nums(c.nums), sep(c.sep)
{
Size = c.Size;
dim = c.dim;
a = new Type[c.Size];
memcpy(a, c.a, (c.Size) * sizeof(Type));
}
/*!
\brief Move constructor.
*/
coll(coll &&c):
nums(c.nums),
sep(c.sep)
{
Size = c.Size;
dim = c.dim;
a = c.a;
c.a = nullptr;
c.Size = 0;
c.a.set_all(0);
}
coll& operator= (const coll& c)
{
if(&c == this)
{
return (*this);
}
nums = c.nums;
sep = c.sep;
Size = c.Size;
dim = c.dim;
if (a != nullptr)
{
delete[] a;
}
a = new Type[c.Size];
memcpy(a, c.a, (c.Size) * sizeof(Type));
return (*this);
}
coll& operator= (coll&& c)
{
if(&c == this)
{
return (*this);
}
nums = c.nums;
sep = c.sep;
Size = c.Size;
dim = c.dim;
if (a != nullptr)
{
delete[] a;
}
a = c.a;
c.a = nullptr;
return (*this);
}
/*!
\brief Access operator through an index.
*/
inline Type& operator[](const indexer i)
{
return a[i];
}
/*!
\brief Access operator through an index.
*/
inline const Type& operator[](const indexer i) const
{
return a[i];
}
/*!
\brief Bounded access operator through an index.
\throw In case the access is out of bounds, uses g24_lib::exceptions::throw_exception
to possibly throw a g24_lib::exceptions::out_of_bounds_access.
*/
inline Type& operator()(const indexer i)
{
if (i >= Size)
{
exceptions::throw_exception(exceptions::out_of_bounds_access("Coll deprecated, but it's accessing outside."));
}
return a[i];
}
/*!
\brief Bounded access operator through an index.
\throw In case the access is out of bounds, uses g24_lib::exceptions::throw_exception
to possibly throw a g24_lib::exceptions::out_of_bounds_access.
*/
inline const Type& operator()(const indexer i) const
{
if (i >= Size)
{
exceptions::throw_exception(exceptions::out_of_bounds_access("Coll deprecated, but it's accessing outside."));
}
return a[i];
}
/*!
\brief Converts from an ordered position to an index.
\remark In essence, this is the dot product between the ordered position
(understood as a vector) and the stride at each dimension (as a vector as well).
\remark To understand the algorithm, take for example a 3d collection of elements with
`{11, 12, 13}` in each dimension. The strides will be `{156,13,1}`.
\remark Suppose one wants to find the index that corresponds to the position `{4,5,6}`.
Given the way positions are translated into indexes, one needs to compute:
`4 * 156 + 5 * 13 + 1 * 6 = 624 + 65 + 6 = 695`.
\sa g24_lib::coll::to_point
*/
inline indexer to_elem(const point<indexer, indexer> &p) const
{
indexer i, elem = 0;
for (i = 0; i < dim; ++i)
{
elem += p[i]*sep[i];
}
return elem;
}
/*!
\brief Access operator with an ordered position. Has worse performance than through an index.
\sa g24_lib::coll::to_elem
*/
inline Type& operator[] (const point<indexer, indexer> p)
{
return (*this)[to_elem(p)];
}
/*!
\brief Access operator with an ordered position. Has worse performance than through an index.
\sa g24_lib::coll::to_elem
*/
inline const Type& operator[] (const point<indexer, indexer> p) const
{
return (*this)[to_elem(p)];
}
/*!
\brief Bounded access operator with an ordered position. Has worse performance than through an index.
\throw In case the access is out of bounds, uses g24_lib::exceptions::throw_exception
to possibly throw a g24_lib::exceptions::out_of_bounds_access.
\sa g24_lib::coll::to_elem
*/
inline Type& operator() (const point<indexer, indexer> p)
{
return (*this)(to_elem(p));
}
/*!
\brief Bounded access operator with an ordered position. Has worse performance than through an index.
\throw In case the access is out of bounds, uses g24_lib::exceptions::throw_exception
to possibly throw a g24_lib::exceptions::out_of_bounds_access.
\sa g24_lib::coll::to_elem
*/
inline const Type& operator() (const point<indexer, indexer> p) const
{
return (*this)(to_elem(p));
}
/*!
\brief Converts from an index to an ordered position.
\remark To understand the algorithm, take for example a 3d collection of elements with
`{11, 12, 13}` in each dimension. The strides will be `{156,13,1}`.
\remark Suppose one wants to find the ordered position that corresponds to the index 123.
Given the way positions are translated into indexes, one needs to compute:
#- `123 / 156 = 0` (in integer arithmetic), `123 % 156 = 123`:
The first component of the position is 0, we proceed with the number `123`.
#- `123 / 13 = 9`, `123 % 13 = 6`: The second component of the position is 9,
we proceed with the number `6`
#- `6 / 1 = 6`: The third and last component of the position is `6`.
\sa g24_lib::coll::to_elem
*/
inline point<indexer, indexer> to_point(const indexer elem) const
{
indexer j, m = elem;
point<indexer, indexer> res(dim);
for (j = 0; j < dim; ++j)
{
res[j] = m/sep[j];
m = m % sep[j];
}
return res;
}
/*!
\brief Returs the elements of the `coll` that are direct neighbors to the specified element.
\param elem The index of the element whose neighbors will be returned.
\param p The ordered position of the element whose neighbors will be returned.
\pre \p elem and \p p must obviously refer to the same element.
\returns An `std::vector` containing the neighbors of the specified element.
For an element with an ordered position `(a,b,...,d)`,
the return vector will be ordered as:
`(a+1,b,...,d)`,`(a-1,b,...,d)`,`(a,b+1,...,d)`,`(a,b-1,...,d)`,...,`(a,b,...,d-1)`,
with any invalid positions being ommitted
(if the specified element is in the border of the collection).
\remark Of course, this is extremely inefficient for large types, which is another reason
for this class to be deprecated...
*/
std::vector<Type> get_near(const indexer elem, const point<indexer, indexer> &p) const
{
indexer i, j = 0;
std::vector<Type> ret;
ret.reserve(dim*2);
for (i = 0; i < dim; ++i)
{
if (p[i] > 0)
{
ret.push_back(a[elem + sep[i]]);
++j;
}
if (p[i] > 0 && p[i] < nums[i] - 1)
{
ret.push_back(a[elem - sep[i]]);
++j;
}
}
ret.shrink_to_fit();
return ret;
}
/*!
\brief Returs the elements of the `coll` that are direct neighbors to the specified element.
\param p The ordered position of the element whose neighbors will be returned.
\returns An `std::vector` containing the neighbors of the specified element.
For an element with an ordered position `(a,b,...,d)`,
the return vector will be ordered as:
`(a+1,b,...,d)`,`(a-1,b,...,d)`,`(a,b+1,...,d)`,`(a,b-1,...,d)`,...,`(a,b,...,d-1)`,
with any invalid positions being ommitted
(if the specified element is in the border of the collection).
\remark Of course, this is extremely inefficient for large types, which is another reason
for this class to be deprecated...
*/
inline std::vector<Type> get_near(const point<indexer, indexer> &p) const
{
return get_near(to_elem(p), p);
}
/*!
\brief Returs the elements of the `coll` that are direct neighbors to the specified element.
\param elem The index of the element whose neighbors will be returned.
\returns An `std::vector` containing the neighbors of the specified element.
For an element with an ordered position `(a,b,...,d)`,
the return vector will be ordered as:
`(a+1,b,...,d)`,`(a-1,b,...,d)`,`(a,b+1,...,d)`,`(a,b-1,...,d)`,...,`(a,b,...,d-1)`,
with any invalid positions being ommitted
(if the specified element is in the border of the collection).
\remark Of course, this is extremely inefficient for large types, which is another reason
for this class to be deprecated...
*/
inline std::vector<Type> get_near(const indexer elem) const
//Obtém os pontos mais próximos do elemento.
{
return get_near(elem, to_point(elem));
}
/*!
\brief Sets all of the elements in the border of the collection to \p v.
\remark When the authors finished writing the algorithm for this function,
though in retrospective it doesn't seem that impressive,
they were very glad to have been able to figure out a way
to select every element of the border in linear time
and without repetition.
*/
void set_border(const Type &v)
{
indexer i,j, n;
for (i = 0; i < dim; ++i)
//i fixes the coordinate where we will consider 0 or max.
{
n = 0;
for(j = 0; j < i; ++j)
{
n += sep[j];
}
//To start at (1, ... , 1, 0, ...) and avoid repetition.
while(n < Size - sep[0])
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (indexer k = 0; k < sep[i]; ++k)
{
a[k+n] = v;
a[k+n+(nums[i]-1)*sep[i]] = v;
}
if(i > 0)
{
n += sep[i-1];
}
else
{
n = Size;
}
for(j = i - 1; j < i; --j)
//This can come outside the conditional
//since i = 0 invalidates j < i
//and i = 1 is taken care of through Size - sep[0]
{
if(j > 0 && n % sep[j-1] == 0)
{
n += sep[j];
}
else if(j > 0 && (n % sep[j-1])/sep[j] == (nums[j]-1))
{
n += 2*sep[j];
}
}
}
}
}
/*!
\brief Returns the number of elements in the border of the collection.
*/
indexer count_border() const
//Conta o número de elementos da fronteira.
{
indexer i, j, n, count = 0;
for (i = 0; i < dim; ++i)
//i fixes the coordinate where we will consider 0 or max.
{
n = 0;
for(j = 0; j < i; ++j)
{
n += sep[j];
}
//To start at (1, ... , 1, 0, ...) and avoid repetition.
while(n < Size - sep[0])
{
count += 2*sep[i];
if(i > 0)
{
n += sep[i-1];
}
else
{
n = Size;
}
for(j = i - 1; j < i; --j)
//This can come outside the conditional
//since i = 0 invalidates j < i
//and i = 1 is taken care of through Size - sep[0]
{
if(j > 0 && n % sep[j-1] == 0)
{
n += sep[j];
}
else if(j > 0 && (n % sep[j-1])/sep[j] == (nums[j]-1))
{
n += 2*sep[j];
}
}
}
}
return count;
}
/*
void operate_on_border(Type (*f) (const Type &, const indexer, const coll<Type, indexer> &, void*), void *par = nullptr)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (indexer i = 0; i < Size; ++i)
{
a[i] = f(a[i], i, (*this), par);
}
}
void operate_on_all(Type (*f) (const Type &, const point<indexer, indexer> &, const coll<Type, indexer> &, void *), void *par = nullptr)
{
#ifdef _OPENMP
#pragma omp parallel for
for (indexer i = 0; i < Size; ++i)
{
point<indexer, indexer> p = to_point (i);
a[i] = f(a[i], p, (*this), par);
}
#else
point<indexer, indexer> p(dim,0);
indexer i;
for (i = 0; i < Size; ++i)
{
a[i] = f(a[i], p, (*this), par);
p.inc_with_wrap(nums, dim-1, true, true);
}
#endif
}
*/
/*!
\brief Checks if an element belongs to the border of the collection.
*/
inline bool is_border(const point <indexer, indexer> &p) const
{
indexer i;
for(i = 0; i < dim; ++i)
{
if (p[i] == 0 || p[i] == nums[i] - 1)
{
return true;
}
}
return false;
}
/*!
\brief Checks if an element belongs to the border of the collection.
*/
inline bool is_border(const indexer elem) const
{
return is_border(to_point(elem));
}
/*!
\brief Operates on all elements of the collection.
\param f A function that takes the current element, its index, a reference to the current `coll`
and an extra `void *` as arguments and returns the new element.
\param par A pointer to any extra arguments.
\remark This heavily foregoes type safety and leans too much towards C and not C++.
The authors blame that on their lack of experience with the language at the point this was written.
`g24_lib::ndview` implements this in a more generic and idiomatic way,
using template parameter packs and other C++ syntax goodies.
*/
inline void operate_on_all(Type (*f) (const Type &, const indexer, const coll<Type, indexer> &, void*), void *par = nullptr)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (indexer i = 0; i < Size; ++i)
{
a[i] = f(a[i], i, (*this), par);
}
}
/*!
\brief Operates on all elements of the collection.
\param f A function that takes the current element, its ordered position, a reference to the current `coll`
and an extra `void *` as arguments and returns the new element.
\param par A pointer to any extra arguments.
\remark This heavily foregoes type safety and leans too much towards C and not C++.
The authors blame that on their lack of experience with the language at the point this was written.
`g24_lib::ndview` implements this in a more generic and idiomatic way,
using template parameter packs and other C++ syntax goodies.
*/
inline void operate_on_all(Type (*f) (const Type &, const point<indexer, indexer> &, const coll<Type, indexer> &, void *), void *par = nullptr)
{
#ifdef _OPENMP
#pragma omp parallel for
for (indexer i = 0; i < Size; ++i)
{
point<indexer, indexer> p = to_point (i);
a[i] = f(a[i], p, (*this), par);
}
#else
point<indexer, indexer> p(dim,(indexer) 0);
indexer i;
for (i = 0; i < Size; ++i)
{
a[i] = f(a[i], p, (*this), par);
p.inc_with_wrap(nums, dim-1, true, true);
}
#endif
}
/*!
\brief Sets all the elements with coordinate \p coord in dimension \p dms to \p v.
*/
void set_with_specific_coord(const Type &v, const indexer dms, const indexer coord)
{
indexer j = 0, n = coord*sep[dms];
while(n + j + sep[dms] - 1 < Size)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (indexer i = 0; i < sep[dms]; ++i)
{
a[i + n + j] = v;
}
if(dms > 0)
{
j += sep[dms - 1];
}
else
{
return;
}
}
}
~coll()
{
delete[] a;
}
/*!
\brief Prints a collection in a roughly understantable format.
\warning For proper serialization, use `textual_output`.
*/
friend std::ostream& operator<< (std::ostream &s, const coll<Type, indexer> &c)
//Imprime uma colecção.
//Não é necessariamente uma forma de armazenar a colecção,
//antes para um output visualmente compreensível.
{
indexer i;
s << "\n";
if (c.Size)
{
for(i = 0; i < c.size(); ++i)
{
if(c.dim > 2 && i % c.separation(c.dim - 3) == 0)
{
s << "\n\n" << (c.to_point(i) >> 2) << "\n\n";
}
if(c.dim > 1 && i % c.separation(c.dim-2) == 0)
{
s << "\n";
}
s << c.a[i] << " ";
}
}
return s;
}
/*!
\brief Prints a `coll` to a stream in a futurely readable way.
\details Prints the number of dimensions,
followed by the number of elements in each dimension,
followed by the stride in each dimenson,
then the total size and finally the elements,
all with `operator<<`.
\deprecated Old way of printing the contents of the `coll`,
provided for backwards compatibility reasons.
Please use `textual_output` for proper serialization.
*/
template <class stream>
void raw_print(stream &s)
{
s << dim << " ";
nums.raw_print(s, false);
s << " ";
sep.raw_print(s, false);
s << Size << " ";
for(indexer i = 0; i < Size; ++i)
{
s << a[i] << " ";
}
}
/*!
\brief Reads a `coll` in the format
\details Reads the number of dimensions,
followed by the number of elements in each dimension,
followed by the stride in each dimension,
then the total size and finally the elements,
all with `operator>>`.
\deprecated Old way of reading the contents of the `coll`,
provided for backwards compatibility reasons.
Please use `textual_input` for proper serialization.
*/
template <class stream>
void raw_read(stream &s)
{
s >> dim;
nums.raw_read(s, dim);
sep.raw_read(s, dim);
indexer temp;
s >> temp;
if (temp != Size)
{
delete[] a;
a = new Type[temp];
Size = temp;
}
for(indexer i = 0; i < Size; ++i)
{
s >> a[i];
}
}
template<class stream, class str = std::basic_string<typename stream::char_type>>
void textual_output(stream &s, const str& separator = " ") const
{
g24_lib::textual_output(s, nums, separator);
s << separator;
for (indexer i = 0; i < Size; ++i)
{
g24_lib::textual_output(s, a[i], separator);
s << separator;
}
}
template<class stream>
void textual_input(stream &s)
{
point<Type, indexer> new_nums;
g24_lib::textual_input(s, new_nums);
if(s.fail())
{
exceptions::throw_exception(exceptions::array_length_misread(""));
}
coll temp(new_nums.size(), new_nums);
for (indexer i = 0; i < temp.size(); ++i)
{
g24_lib::textual_input(s, temp.a[i]);
}
(*this) = temp;
}
template<class stream>
void binary_output(stream &s) const
{
g24_lib::binary_output(s, nums);
for (indexer i = 0; i < Size; ++i)
{
g24_lib::binary_output(s, a[i]);
}
}
template<class stream>
void binary_input(stream &s)
{
point<Type, indexer> new_nums;
g24_lib::binary_input(s, new_nums);
if(s.fail())
{
exceptions::throw_exception(exceptions::array_length_misread(""));
}
coll temp(new_nums.size(), new_nums);
for (indexer i = 0; i < temp.size(); ++i)
{
g24_lib::binary_input(s, temp.arr[i]);
}
(*this) = temp;
}
/*!
\brief An old kludge to enable output of `colls` of `g24_lib::fspoints`
way before `serialization_helpers.h` was written.
\deprecated Currently, it simply calls `g24_lib::coll::binary_output`
since this already works with `g24_lib::fspoints`,
but the function is kept here for backwards compatibility.
*/
template <class stream>
friend void binary_output_special(stream &s, const coll &c)
{
c.binary_output(s);
/*
s.write( (char*) &c.dim, sizeof(indexer));
s.write( (char*) c.nums.get_access_to_array(), c.dim*sizeof(indexer));
s.write( (char*) c.sep.get_access_to_array(), c.dim*sizeof(indexer));
s.write( (char*) &c.Size, sizeof(indexer));
for(indexer i = 0; i < c.Size; ++i)
{
s.write( (char*) c.a[i].get_access_to_array(), c.a[i].size() * sizeof(c.a[i][0]));
}
*/
}
/*!
\brief An old kludge to enable input of `colls` of `g24_lib::fspoints`
way before `serialization_helpers.h` was written.
\deprecated Currently, it simply calls `g24_lib::coll::binary_input`
since this already works with `g24_lib::fspoints`,
but the function is kept here for backwards compatibility.
*/
template <class stream>
friend void binary_input_special(stream &s, coll &c)
{
c.binary_input(s);
/*
s.read( (char*) &c.dim, sizeof(indexer));
c.nums.assure_size(c.dim);
s.read( (char*) c.nums.get_access_to_array(), c.dim*sizeof(indexer));
c.sep.assure_size(c.dim);
s.read( (char*) c.sep.get_access_to_array(), c.dim*sizeof(indexer));
indexer temp;
s.read( (char*) &temp, sizeof(indexer));
if(temp != c.Size)
{
c.Size = temp;
delete[] c.a;
c.a = new Type[c.Size];
}
for(indexer i = 0; i < c.Size; ++i)
{
s.read( (char*) c.a[i].get_access_to_array(), c.a[i].size() * sizeof(c.a[i][0]));
}
*/
}
};
}
#endif
|
matmul.c |
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
//#include <malloc.h>
#if OMP == 1
#include <omp.h>
#endif
#ifndef _N_
#define _N_ 8192
#endif
#define MUL(x,y) ((x)*(y))
#ifndef VERIFICATION
#define VERIFICATION 0
#endif
#ifndef TRANSPOSE_Bs
#define TRANSPOSE_Bs 0
#endif
#ifndef HOST_MEM_ALIGNMENT
#define HOST_MEM_ALIGNMENT 1
#endif
#if HOST_MEM_ALIGNMENT == 1
#define AOCL_ALIGNMENT 64
#endif
#ifndef DEBUG_PRINT
#define DEBUG_PRINT 0
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 16
#endif
#ifdef _OPENARC_
#if BLOCK_SIZE == 4
#pragma openarc #define BLOCK_SIZE 4
#elif BLOCK_SIZE == 8
#pragma openarc #define BLOCK_SIZE 8
#elif BLOCK_SIZE == 16
#pragma openarc #define BLOCK_SIZE 16
#elif BLOCK_SIZE == 32
#pragma openarc #define BLOCK_SIZE 32
#elif BLOCK_SIZE == 64
#pragma openarc #define BLOCK_SIZE 64
#endif
#endif
float mul(float x, float y)
{
return MUL(x,y);
}
double my_timer ()
{
struct timeval time;
gettimeofday (&time, 0);
return time.tv_sec + time.tv_usec / 1000000.0;
}
int main(int argc, char **argv)
{
int iter;
int i, j;
int num_iterations = 1;
int bx, by, tx, ty;
int wA, wB;
float *A;
float *B;
float *GPU_C;
float *CPU_C;
#if DEBUG_PRINT == 1
float dSum = 0;
#endif
#if HOST_MEM_ALIGNMENT == 1
void *p;
#endif
double strt_time, done_time;
printf("Matrix Multiplication\n");
printf("width x height = %d x %d\n", _N_, _N_);
printf("Iterations = %d\n", num_iterations);
wA = _N_;
wB = _N_;
#if HOST_MEM_ALIGNMENT == 1
posix_memalign(&p, AOCL_ALIGNMENT, _N_*_N_*sizeof(float));
A = (float *)p;
posix_memalign(&p, AOCL_ALIGNMENT, _N_*_N_*sizeof(float));
B = (float *)p;
posix_memalign(&p, AOCL_ALIGNMENT, _N_*_N_*sizeof(float));
GPU_C = (float *)p;
posix_memalign(&p, AOCL_ALIGNMENT, _N_*_N_*sizeof(float));
CPU_C = (float *)p;
#else
A = (float *)malloc(sizeof(float)*_N_*_N_);
B = (float *)malloc(sizeof(float)*_N_*_N_);
GPU_C = (float *)malloc(sizeof(float)*_N_*_N_);
CPU_C = (float *)malloc(sizeof(float)*_N_*_N_);
#endif
/* initialize matrix A and B */
{
for (i = 0; i < _N_; i++) {
for (j = 0; j < _N_; j++) {
if (i == j)
A[i*_N_+j] = 1.0f;
else
A[i*_N_+j] = 0.0f;
B[i*_N_+j] = i+j;
GPU_C[i*_N_+j] = 0.0f;
}
}
}
printf("Starting with gpu run\n");
strt_time = my_timer ();
for (iter = 0; iter < num_iterations; iter++) {
#pragma openarc opencl num_simd_work_items(2)
#pragma acc kernels loop copyout(GPU_C[0:_N_*_N_]) copyin(A[0:_N_*_N_],B[0:_N_*_N_]) gang(_N_/BLOCK_SIZE)
for(by = 0; by < (_N_/BLOCK_SIZE); by++) {
#pragma acc loop gang(_N_/BLOCK_SIZE)
for(bx = 0; bx < (_N_/BLOCK_SIZE); bx++) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
float Bs[BLOCK_SIZE][BLOCK_SIZE];
#pragma acc loop worker(BLOCK_SIZE)
for(ty = 0; ty < BLOCK_SIZE; ty++) {
#pragma acc loop worker(BLOCK_SIZE)
for(tx = 0; tx < BLOCK_SIZE; tx++) {
//Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
#if TRANSPOSE_Bs == 0
Bs[ty][tx] = B[b + wB * ty + tx];
#else
Bs[tx][ty] = B[b + wB * ty + tx];
#endif
// Synchronize to make sure the matrices are loaded
//#pragma acc barrier
#pragma acc barrier(acc_mem_fence_local)
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
#if TRANSPOSE_Bs == 0
Csub += As[ty][k] * Bs[k][tx];
#else
Csub += As[ty][k] * Bs[tx][k];
#endif
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
//#pragma acc barrier
#pragma acc barrier(acc_mem_fence_local)
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
GPU_C[c + wB * ty + tx] = Csub;
}
}
}
}
}
done_time = my_timer ();
printf("Done with gpu run\n");
printf ("Accelerator Elapsed time = %lf sec\n", done_time - strt_time);
#if VERIFICATION == 1
printf("Starting with cpu run\n");
strt_time = my_timer ();
/* verification */
for (iter = 0; iter < num_iterations; iter++) {
int i;
#pragma omp parallel for shared(A,B,CPU_C) private(i)
for (i = 0; i < _N_; i++) {
int j;
for (j = 0; j < _N_; j++) {
int k;
float sum = 0.0f;
for (k = 0; k < _N_; k++) {
sum += mul(A[i*_N_+k],B[k*_N_+j]);
}
CPU_C[i*_N_+j] = sum;
}
}
}
done_time = my_timer ();
printf("Done with cpu run\n");
printf ("Reference CPU time = %lf sec\n", done_time - strt_time);
for (i = 0; i < _N_; i++) {
for (j = 0; j < _N_; j++) {
if (CPU_C[i*_N_+j] != GPU_C[i*_N_+j]) {
printf("Verification: Failed\n");
printf("CPU_C = %f\tGPU_C = %f\n", CPU_C[i*_N_+j], GPU_C[i*_N_+j]);
return 1;
}
}
}
printf("Verification: Successful\n");
#endif
#if DEBUG_PRINT == 1
for(i=0; i<_N_; i++) {
dSum += GPU_C[i*_N_+i];
}
printf("Diagonal Sum of GPU_C = %f\n", dSum);
#endif
free(A);
free(B);
free(GPU_C);
free(CPU_C);
return 0;
}
|
2.norace7.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 20
int main() {
double A[N], B[N], sum0 = 0.0, sum1 = 0.0;
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < N; i++) {
A[i] = i;
B[i] = i * i;
}
#pragma omp simd reduction(+ : sum0)
for (int i = 0; i < N; i++) {
sum0 += A[i] * B[i];
}
}
for (int i = 0; i < N; i++) {
sum1 += i * i * i;
}
return (sum1 - sum0);
}
// Printing in reverse. Need to fix it.
// CHECK: Region is Data Race Free.
// CHECK: Region is Data Race Free.
// END
|
composite.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE %
% C O O MM MM P P O O SS I T E %
% C O O M M M PPPP O O SSS I T EEE %
% C O O M M P O O SS I T E %
% CCCC OOO M M P OOO SSSSS IIIII T EEEEE %
% %
% %
% MagickCore Image Composite Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resample.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p o s i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompositeImage() returns the second image composited onto the first
% at the specified offset, using the specified composite method.
%
% The format of the CompositeImage method is:
%
% MagickBooleanType CompositeImage(Image *image,
% const Image *source_image,const CompositeOperator compose,
% const MagickBooleanType clip_to_self,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the canvas image, modified by he composition
%
% o source_image: the source image.
%
% o compose: This operator affects how the composite is applied to
% the image. The operators and how they are utilized are listed here
% http://www.w3.org/TR/SVG12/#compositing.
%
% o clip_to_self: set to MagickTrue to limit composition to area composed.
%
% o x_offset: the column offset of the composited image.
%
% o y_offset: the row offset of the composited image.
%
% Extra Controls from Image meta-data in 'image' (artifacts)
%
% o "compose:args"
% A string containing extra numerical arguments for specific compose
% methods, generally expressed as a 'geometry' or a comma separated list
% of numbers.
%
% Compose methods needing such arguments include "BlendCompositeOp" and
% "DisplaceCompositeOp".
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
Composition based on the SVG specification:
A Composition is defined by...
Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors
Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc)
Y = 1 for source preserved
Z = 1 for canvas preserved
Conversion to transparency (then optimized)
Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa)
Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa)
Where...
Sca = Sc*Sa normalized Source color divided by Source alpha
Dca = Dc*Da normalized Dest color divided by Dest alpha
Dc' = Dca'/Da' the desired color value for this channel.
Da' in in the follow formula as 'gamma' The resulting alpla value.
Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in
the following optimizations...
gamma = Sa+Da-Sa*Da;
gamma = 1 - QuantumScale*alpha * QuantumScale*beta;
opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma
The above SVG definitions also define that Mathematical Composition
methods should use a 'Over' blending mode for Alpha Channel.
It however was not applied for composition modes of 'Plus', 'Minus',
the modulus versions of 'Add' and 'Subtract'.
Mathematical operator changes to be applied from IM v6.7...
1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed
'ModulusAdd' and 'ModulusSubtract' for clarity.
2) All mathematical compositions work as per the SVG specification
with regard to blending. This now includes 'ModulusAdd' and
'ModulusSubtract'.
3) When the special channel flag 'sync' (syncronize channel updates)
is turned off (enabled by default) then mathematical compositions are
only performed on the channels specified, and are applied
independantally of each other. In other words the mathematics is
performed as 'pure' mathematical operations, rather than as image
operations.
*/
static void HCLComposite(const MagickRealType hue,const MagickRealType chroma,
const MagickRealType luma,MagickRealType *red,MagickRealType *green,
MagickRealType *blue)
{
MagickRealType
b,
c,
g,
h,
m,
r,
x;
/*
Convert HCL to RGB colorspace.
*/
assert(red != (MagickRealType *) NULL);
assert(green != (MagickRealType *) NULL);
assert(blue != (MagickRealType *) NULL);
h=6.0*hue;
c=chroma;
x=c*(1.0-fabs(fmod(h,2.0)-1.0));
r=0.0;
g=0.0;
b=0.0;
if ((0.0 <= h) && (h < 1.0))
{
r=c;
g=x;
}
else
if ((1.0 <= h) && (h < 2.0))
{
r=x;
g=c;
}
else
if ((2.0 <= h) && (h < 3.0))
{
g=c;
b=x;
}
else
if ((3.0 <= h) && (h < 4.0))
{
g=x;
b=c;
}
else
if ((4.0 <= h) && (h < 5.0))
{
r=x;
b=c;
}
else
if ((5.0 <= h) && (h < 6.0))
{
r=c;
b=x;
}
m=luma-(0.298839*r+0.586811*g+0.114350*b);
*red=QuantumRange*(r+m);
*green=QuantumRange*(g+m);
*blue=QuantumRange*(b+m);
}
static void CompositeHCL(const MagickRealType red,const MagickRealType green,
const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma,
MagickRealType *luma)
{
MagickRealType
b,
c,
g,
h,
max,
r;
/*
Convert RGB to HCL colorspace.
*/
assert(hue != (MagickRealType *) NULL);
assert(chroma != (MagickRealType *) NULL);
assert(luma != (MagickRealType *) NULL);
r=red;
g=green;
b=blue;
max=MagickMax(r,MagickMax(g,b));
c=max-(MagickRealType) MagickMin(r,MagickMin(g,b));
h=0.0;
if (c == 0)
h=0.0;
else
if (red == max)
h=fmod((g-b)/c+6.0,6.0);
else
if (green == max)
h=((b-r)/c)+2.0;
else
if (blue == max)
h=((r-g)/c)+4.0;
*hue=(h/6.0);
*chroma=QuantumScale*c;
*luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b);
}
static MagickBooleanType CompositeOverImage(Image *image,
const Image *source_image,const MagickBooleanType clip_to_self,
const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*image_view,
*source_view;
const char
*value;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Composite image.
*/
status=MagickTrue;
progress=0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
PixelInfo
canvas_pixel,
source_pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*(ssize_t) GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
Sa,
Sc,
Sca;
register ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
alpha=Sa+Da-Sa*Da;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((source_traits == UndefinedPixelTrait) &&
(channel != AlphaPixelChannel))
continue;
if (channel == AlphaPixelChannel)
{
/*
Set alpha channel.
*/
pixel=QuantumRange*alpha;
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Sc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
gamma=PerceptibleReciprocal(alpha);
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport MagickBooleanType CompositeImage(Image *image,
const Image *composite,const CompositeOperator compose,
const MagickBooleanType clip_to_self,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*source_view,
*image_view;
const char
*value;
GeometryInfo
geometry_info;
Image
*canvas_image,
*source_image;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
MagickRealType
amount,
canvas_dissolve,
midpoint,
percent_luma,
percent_chroma,
source_dissolve,
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(composite != (Image *) NULL);
assert(composite->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
source_image=CloneImage(composite,0,0,MagickTrue,exception);
if (source_image == (const Image *) NULL)
return(MagickFalse);
(void) SetImageColorspace(source_image,image->colorspace,exception);
if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp))
{
status=CompositeOverImage(image,source_image,clip_to_self,x_offset,
y_offset,exception);
source_image=DestroyImage(source_image);
return(status);
}
amount=0.5;
canvas_image=(Image *) NULL;
canvas_dissolve=1.0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
SetGeometryInfo(&geometry_info);
percent_luma=100.0;
percent_chroma=100.0;
source_dissolve=1.0;
threshold=0.05f;
switch (compose)
{
case CopyCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
if ((source_image->alpha_trait == UndefinedPixelTrait) &&
(image->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlphaChannel(source_image,OpaqueAlphaChannel,exception);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(source_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(source_image,i);
PixelTrait source_traits = GetPixelChannelTraits(source_image,
channel);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((source_traits == UndefinedPixelTrait) ||
(traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case IntensityCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (GetPixelReadMask(source_image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
SetPixelAlpha(image,clamp != MagickFalse ?
ClampPixel(GetPixelIntensity(source_image,p)) :
ClampToQuantum(GetPixelIntensity(source_image,p)),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case CopyAlphaCompositeOp:
case ChangeMaskCompositeOp:
{
/*
Modify canvas outside the overlaid region and require an alpha
channel to exist, to add transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case BlurCompositeOp:
{
CacheView
*canvas_view;
double
angle_range,
angle_start,
height,
width;
PixelInfo
pixel;
ResampleFilter
*resample_filter;
SegmentInfo
blur;
/*
Blur Image by resampling dictated by an overlay gradient map:
X = red_channel; Y = green_channel; compose:args =
x_scale[,y_scale[,angle]].
*/
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
/*
Gather the maximum blur sigma values from user.
*/
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (const char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & WidthValue) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"InvalidSetting","'%s' '%s'","compose:args",value);
source_image=DestroyImage(source_image);
canvas_image=DestroyImage(canvas_image);
return(MagickFalse);
}
/*
Users input sigma now needs to be converted to the EWA ellipse size.
The filter defaults to a sigma of 0.5 so to make this match the users
input the ellipse size needs to be doubled.
*/
width=2.0*geometry_info.rho;
height=width;
if ((flags & HeightValue) != 0)
height=2.0*geometry_info.sigma;
/*
Default the unrotated ellipse width and height axis vectors.
*/
blur.x1=width;
blur.x2=0.0;
blur.y1=0.0;
blur.y2=height;
if ((flags & XValue) != 0 )
{
MagickRealType
angle;
/*
Rotate vectors if a rotation angle is given.
*/
angle=DegreesToRadians(geometry_info.xi);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
angle_start=0.0;
angle_range=0.0;
if ((flags & YValue) != 0 )
{
/*
Lets set a angle range and calculate in the loop.
*/
angle_start=DegreesToRadians(geometry_info.xi);
angle_range=DegreesToRadians(geometry_info.psi)-angle_start;
}
/*
Set up a gaussian cylindrical filter for EWA Bluring.
As the minimum ellipse radius of support*1.0 the EWA algorithm
can only produce a minimum blur of 0.5 for Gaussian (support=2.0)
This means that even 'No Blur' will be still a little blurry! The
solution (as well as the problem of preventing any user expert filter
settings, is to set our own user settings, restore them afterwards.
*/
resample_filter=AcquireResampleFilter(image,exception);
SetResampleFilter(resample_filter,GaussianFilter);
/*
Perform the variable blurring of each pixel in image.
*/
GetPixelInfo(image,&pixel);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
if (fabs(angle_range) > MagickEpsilon)
{
MagickRealType
angle;
angle=angle_start+angle_range*QuantumScale*
GetPixelBlue(source_image,p);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
ScaleResampleFilter(resample_filter,
blur.x1*QuantumScale*GetPixelRed(source_image,p),
blur.y1*QuantumScale*GetPixelGreen(source_image,p),
blur.x2*QuantumScale*GetPixelRed(source_image,p),
blur.y2*QuantumScale*GetPixelGreen(source_image,p) );
(void) ResamplePixelColor(resample_filter,(double) x_offset+x,
(double) y_offset+y,&pixel,exception);
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
resample_filter=DestroyResampleFilter(resample_filter);
source_view=DestroyCacheView(source_view);
canvas_view=DestroyCacheView(canvas_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
horizontal_scale,
vertical_scale;
PixelInfo
pixel;
PointInfo
center,
offset;
/*
Displace/Distort based on overlay gradient map:
X = red_channel; Y = green_channel;
compose:args = x_scale[,y_scale[,center.x,center.y]]
*/
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & (WidthValue | HeightValue)) == 0 )
{
if ((flags & AspectValue) == 0)
{
horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0;
vertical_scale=(MagickRealType) (source_image->rows-1)/2.0;
}
else
{
horizontal_scale=(MagickRealType) (image->columns-1)/2.0;
vertical_scale=(MagickRealType) (image->rows-1)/2.0;
}
}
else
{
horizontal_scale=geometry_info.rho;
vertical_scale=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
if ((flags & AspectValue) == 0)
{
horizontal_scale*=(source_image->columns-1)/200.0;
vertical_scale*=(source_image->rows-1)/200.0;
}
else
{
horizontal_scale*=(image->columns-1)/200.0;
vertical_scale*=(image->rows-1)/200.0;
}
}
if ((flags & HeightValue) == 0)
vertical_scale=horizontal_scale;
}
/*
Determine fixed center point for absolute distortion map
Absolute distort ==
Displace offset relative to a fixed absolute point
Select that point according to +X+Y user inputs.
default = center of overlay image
arg flag '!' = locations/percentage relative to background image
*/
center.x=(MagickRealType) x_offset;
center.y=(MagickRealType) y_offset;
if (compose == DistortCompositeOp)
{
if ((flags & XValue) == 0)
if ((flags & AspectValue) != 0)
center.x=(MagickRealType) ((image->columns-1)/2.0);
else
center.x=(MagickRealType) (x_offset+(source_image->columns-1)/
2.0);
else
if ((flags & AspectValue) != 0)
center.x=geometry_info.xi;
else
center.x=(MagickRealType) (x_offset+geometry_info.xi);
if ((flags & YValue) == 0)
if ((flags & AspectValue) != 0)
center.y=(MagickRealType) ((image->rows-1)/2.0);
else
center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0);
else
if ((flags & AspectValue) != 0)
center.y=geometry_info.psi;
else
center.y=(MagickRealType) (y_offset+geometry_info.psi);
}
/*
Shift the pixel offset point as defined by the provided,
displacement/distortion map. -- Like a lens...
*/
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
/*
Displace the offset.
*/
offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ?
x : 0);
offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ?
y : 0);
status=InterpolatePixelInfo(image,image_view,
UndefinedInterpolatePixel,(double) offset.x,(double) offset.y,
&pixel,exception);
if (status == MagickFalse)
break;
/*
Mask with the 'invalid pixel mask' in alpha channel.
*/
pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)*
(QuantumScale*GetPixelAlpha(source_image,p));
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
if (x < (ssize_t) source_image->columns)
break;
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
canvas_view=DestroyCacheView(canvas_view);
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DissolveCompositeOp:
{
/*
Geometry arguments to dissolve factors.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0;
if ((source_dissolve-MagickEpsilon) < 0.0)
source_dissolve=0.0;
if ((source_dissolve+MagickEpsilon) > 1.0)
{
canvas_dissolve=2.0-source_dissolve;
source_dissolve=1.0;
}
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
if ((canvas_dissolve-MagickEpsilon) < 0.0)
canvas_dissolve=0.0;
}
break;
}
case BlendCompositeOp:
{
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0-source_dissolve;
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
}
break;
}
case MathematicsCompositeOp:
{
/*
Just collect the values from "compose:args", setting.
Unused values are set to zero automagically.
Arguments are normally a comma separated list, so this probably should
be changed to some 'general comma list' parser, (with a minimum
number of values)
*/
SetGeometryInfo(&geometry_info);
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
(void) ParseGeometry(value,&geometry_info);
break;
}
case ModulateCompositeOp:
{
/*
Determine the luma and chroma scale.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
percent_luma=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_chroma=geometry_info.sigma;
}
break;
}
case ThresholdCompositeOp:
{
/*
Determine the amount and threshold.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
amount=geometry_info.rho;
threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold=0.05f;
}
threshold*=QuantumRange;
break;
}
default:
break;
}
/*
Composite image.
*/
status=MagickTrue;
progress=0;
midpoint=((MagickRealType) QuantumRange+1.0)/2;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
MagickRealType
blue,
chroma,
green,
hue,
luma,
red;
PixelInfo
canvas_pixel,
source_pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*(ssize_t) GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
hue=0.0;
chroma=0.0;
luma=0.0;
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
DcaDa,
Sa,
SaSca,
Sc,
Sca;
register ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
switch (compose)
{
case AlphaCompositeOp:
case ChangeMaskCompositeOp:
case CopyAlphaCompositeOp:
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case OutCompositeOp:
case SrcInCompositeOp:
case SrcOutCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
break;
}
case ClearCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=0.0;
break;
}
case BlendCompositeOp:
case DissolveCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=canvas_dissolve*GetPixelAlpha(source_image,source);
else
pixel=(MagickRealType) source[channel];
break;
}
default:
{
pixel=(MagickRealType) source[channel];
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
switch (compose)
{
case BumpmapCompositeOp:
{
alpha=GetPixelIntensity(source_image,p)*Sa;
break;
}
case ColorBurnCompositeOp:
case ColorDodgeCompositeOp:
case DarkenCompositeOp:
case DifferenceCompositeOp:
case DivideDstCompositeOp:
case DivideSrcCompositeOp:
case ExclusionCompositeOp:
case FreezeCompositeOp:
case HardLightCompositeOp:
case HardMixCompositeOp:
case InterpolateCompositeOp:
case LightenCompositeOp:
case LinearBurnCompositeOp:
case LinearDodgeCompositeOp:
case LinearLightCompositeOp:
case MathematicsCompositeOp:
case MinusDstCompositeOp:
case MinusSrcCompositeOp:
case MultiplyCompositeOp:
case NegateCompositeOp:
case OverlayCompositeOp:
case PegtopLightCompositeOp:
case PinLightCompositeOp:
case ReflectCompositeOp:
case ScreenCompositeOp:
case SoftBurnCompositeOp:
case SoftDodgeCompositeOp:
case SoftLightCompositeOp:
case StampCompositeOp:
case VividLightCompositeOp:
{
alpha=RoundToUnity(Sa+Da-Sa*Da);
break;
}
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case SrcInCompositeOp:
{
alpha=Sa*Da;
break;
}
case DissolveCompositeOp:
{
alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+
canvas_dissolve*Da;
break;
}
case DstOverCompositeOp:
case OverCompositeOp:
case SrcOverCompositeOp:
{
alpha=Sa+Da-Sa*Da;
break;
}
case DstOutCompositeOp:
{
alpha=Da*(1.0-Sa);
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
alpha=Sa*(1.0-Da);
break;
}
case BlendCompositeOp:
case PlusCompositeOp:
{
alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da);
break;
}
case XorCompositeOp:
{
alpha=Sa+Da-2.0*Sa*Da;
break;
}
case ModulusAddCompositeOp:
{
if ((Sa+Da) <= 1.0)
{
alpha=(Sa+Da);
break;
}
alpha=((Sa+Da)-1.0);
break;
}
case ModulusSubtractCompositeOp:
{
if ((Sa-Da) >= 0.0)
{
alpha=(Sa-Da);
break;
}
alpha=((Sa-Da)+1.0);
break;
}
default:
{
alpha=1.0;
break;
}
}
switch (compose)
{
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case ModulateCompositeOp:
case RMSECompositeOp:
case SaturateCompositeOp:
{
GetPixelInfoPixel(source_image,p,&source_pixel);
GetPixelInfoPixel(image,q,&canvas_pixel);
break;
}
default:
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel,
sans;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits = GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((channel == AlphaPixelChannel) &&
((traits & UpdatePixelTrait) != 0))
{
/*
Set alpha channel.
*/
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case CopyBlackCompositeOp:
case CopyBlueCompositeOp:
case CopyCyanCompositeOp:
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
case CopyRedCompositeOp:
case CopyYellowCompositeOp:
case SrcAtopCompositeOp:
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case ChangeMaskCompositeOp:
{
MagickBooleanType
equivalent;
if (Da < 0.5)
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q);
if (equivalent != MagickFalse)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) OpaqueAlpha;
break;
}
case ClearCompositeOp:
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case RMSECompositeOp:
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Da;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Sa;
break;
}
if (Sa < Da)
{
pixel=QuantumRange*Da;
break;
}
pixel=QuantumRange*Sa;
break;
}
case CopyAlphaCompositeOp:
{
if (source_image->alpha_trait == UndefinedPixelTrait)
pixel=GetPixelIntensity(source_image,p);
else
pixel=QuantumRange*Sa;
break;
}
case BlurCompositeOp:
case CopyCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
case DstAtopCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*fabs(Sa-Da);
break;
}
case FreezeCompositeOp:
{
pixel=QuantumRange*(1.0-(1.0-Sa)*(1.0-Sa)*
PerceptibleReciprocal(Da));
if (pixel < 0.0)
pixel=0.0;
break;
}
case InterpolateCompositeOp:
{
pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sa)-0.25*
cos(MagickPI*Da));
break;
}
case LightenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case ModulateCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*Sa*Da;
break;
}
case NegateCompositeOp:
{
pixel=QuantumRange*((1.0-Sa-Da));
break;
}
case ReflectCompositeOp:
{
pixel=QuantumRange*(Sa*Sa*PerceptibleReciprocal(1.0-Da));
if (pixel > QuantumRange)
pixel=QuantumRange;
break;
}
case StampCompositeOp:
{
pixel=QuantumRange*(Sa+Da*Da-1.0);
break;
}
case StereoCompositeOp:
{
pixel=QuantumRange*(Sa+Da)/2;
break;
}
default:
{
pixel=QuantumRange*alpha;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
if (source_traits == UndefinedPixelTrait)
continue;
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=ClampToQuantum(Dc);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
SaSca=Sa*PerceptibleReciprocal(Sca);
DcaDa=Dca*PerceptibleReciprocal(Da);
switch (compose)
{
case DarkenCompositeOp:
case LightenCompositeOp:
case ModulusSubtractCompositeOp:
{
gamma=PerceptibleReciprocal(1.0-alpha);
break;
}
default:
{
gamma=PerceptibleReciprocal(alpha);
break;
}
}
pixel=Dc;
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case SrcAtopCompositeOp:
{
pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa));
break;
}
case BlendCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc);
break;
}
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sca;
break;
}
case BlurCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
{
pixel=Sc;
break;
}
case BumpmapCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc;
break;
}
case ChangeMaskCompositeOp:
{
pixel=Dc;
break;
}
case ClearCompositeOp:
{
pixel=0.0;
break;
}
case ColorBurnCompositeOp:
{
if ((Sca == 0.0) && (Dca == Da))
{
pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa));
break;
}
if (Sca == 0.0)
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)*
SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorDodgeCompositeOp:
{
if ((Sca*Da+Dca*Sa) >= Sa*Da)
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
else
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&sans,&sans,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case CopyAlphaCompositeOp:
{
pixel=Dc;
break;
}
case CopyBlackCompositeOp:
{
if (channel == BlackPixelChannel)
pixel=(MagickRealType) GetPixelBlack(source_image,p);
break;
}
case CopyBlueCompositeOp:
case CopyYellowCompositeOp:
{
if (channel == BluePixelChannel)
pixel=(MagickRealType) GetPixelBlue(source_image,p);
break;
}
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
{
if (channel == GreenPixelChannel)
pixel=(MagickRealType) GetPixelGreen(source_image,p);
break;
}
case CopyRedCompositeOp:
case CopyCyanCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case DarkenCompositeOp:
{
/*
Darken is equivalent to a 'Minimum' method
OR a greyscale version of a binary 'Or'
OR the 'Intersection' of pixel sets.
*/
if ((Sca*Da) < (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa));
break;
}
case DissolveCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa*
canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc);
break;
}
case DivideDstCompositeOp:
{
if ((fabs((double) Sca) < MagickEpsilon) &&
(fabs((double) Dca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (fabs((double) Dca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case DivideSrcCompositeOp:
{
if ((fabs((double) Dca) < MagickEpsilon) &&
(fabs((double) Sca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
if (fabs((double) Sca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
case DstAtopCompositeOp:
{
pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da));
break;
}
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Dca;
break;
}
case DstInCompositeOp:
{
pixel=QuantumRange*gamma*(Dca*Sa);
break;
}
case DstOutCompositeOp:
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
case DstOverCompositeOp:
{
pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da));
break;
}
case ExclusionCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case FreezeCompositeOp:
{
pixel=QuantumRange*gamma*(1.0-(1.0-Sca)*(1.0-Sca)*
PerceptibleReciprocal(Dca));
if (pixel < 0.0)
pixel=0.0;
break;
}
case HardLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-
Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardMixCompositeOp:
{
pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange);
break;
}
case HueCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&sans,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case InCompositeOp:
case SrcInCompositeOp:
{
pixel=QuantumRange*(Sca*Da);
break;
}
case InterpolateCompositeOp:
{
pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sca)-0.25*
cos(MagickPI*Dca));
break;
}
case LinearBurnCompositeOp:
{
/*
LinearBurn: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Sc + Dc - 1
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da);
break;
}
case LinearDodgeCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc);
break;
}
case LinearLightCompositeOp:
{
/*
LinearLight: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Dc + 2*Sc - 1
*/
pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca);
break;
}
case LightenCompositeOp:
{
if ((Sca*Da) > (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case LightenIntensityCompositeOp:
{
/*
Lighten is equivalent to a 'Maximum' method
OR a greyscale version of a binary 'And'
OR the 'Union' of pixel sets.
*/
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case LuminizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&sans,&luma);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case MathematicsCompositeOp:
{
/*
'Mathematics' a free form user control mathematical composition
is defined as...
f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D
Where the arguments A,B,C,D are (currently) passed to composite
as a command separated 'geometry' string in "compose:args" image
artifact.
A = a->rho, B = a->sigma, C = a->xi, D = a->psi
Applying the SVG transparency formula (see above), we get...
Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa)
Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) +
Dca*(1.0-Sa)
*/
pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+
geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+
geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case MinusDstCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa);
break;
}
case MinusSrcCompositeOp:
{
/*
Minus source from canvas.
f(Sc,Dc) = Sc - Dc
*/
pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da);
break;
}
case ModulateCompositeOp:
{
ssize_t
offset;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint);
if (offset == 0)
{
pixel=Dc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
luma+=(0.01*percent_luma*offset)/midpoint;
chroma*=0.01*percent_chroma;
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ModulusAddCompositeOp:
{
if ((Sca+Dca) <= 1.0)
{
pixel=QuantumRange*(Sca+Dca);
break;
}
pixel=QuantumRange*((Sca+Dca)-1.0);
break;
}
case ModulusSubtractCompositeOp:
{
if ((Sca-Dca) >= 0.0)
{
pixel=QuantumRange*(Sca-Dca);
break;
}
pixel=QuantumRange*((Sca-Dca)+1.0);
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case NegateCompositeOp:
{
pixel=QuantumRange*(1.0-fabs(1.0-Sca-Dca));
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da));
break;
}
case OverCompositeOp:
case SrcOverCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
break;
}
case OverlayCompositeOp:
{
if ((2.0*Dca) < Da)
{
pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0-
Da));
break;
}
pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+
Sca*(1.0-Da));
break;
}
case PegtopLightCompositeOp:
{
/*
PegTop: A Soft-Light alternative: A continuous version of the
Softlight function, producing very similar results.
f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc
http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm.
*/
if (fabs((double) Da) < MagickEpsilon)
{
pixel=QuantumRange*gamma*Sca;
break;
}
pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0-
Da)+Dca*(1.0-Sa));
break;
}
case PinLightCompositeOp:
{
/*
PinLight: A Photoshop 7 composition method
http://www.simplefilter.de/en/basics/mixmods.html
f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc
*/
if ((Dca*Sa) < (Da*(2.0*Sca-Sa)))
{
pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa));
break;
}
if ((Dca*Sa) > (2.0*Sca*Da))
{
pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca);
break;
}
case PlusCompositeOp:
{
pixel=QuantumRange*(Sca+Dca);
break;
}
case ReflectCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Sca*PerceptibleReciprocal(1.0-Dca));
if (pixel > QuantumRange)
pixel=QuantumRange;
break;
}
case RMSECompositeOp:
{
double
gray;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
gray=sqrt(
(canvas_pixel.red-source_pixel.red)*
(canvas_pixel.red-source_pixel.red)+
(canvas_pixel.green-source_pixel.green)*
(canvas_pixel.green-source_pixel.green)+
(canvas_pixel.blue-source_pixel.blue)*
(canvas_pixel.blue-source_pixel.blue)/3.0);
switch (channel)
{
case RedPixelChannel: pixel=gray; break;
case GreenPixelChannel: pixel=gray; break;
case BluePixelChannel: pixel=gray; break;
default: pixel=Dc; break;
}
break;
}
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ScreenCompositeOp:
{
/*
Screen: a negated multiply:
f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc)
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca);
break;
}
case SoftBurnCompositeOp:
{
if ((Sca+Dca) < 1.0)
pixel=QuantumRange*gamma*(0.5*Dca*PerceptibleReciprocal(1.0-Sca));
else
pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Sca)*
PerceptibleReciprocal(Dca));
break;
}
case SoftDodgeCompositeOp:
{
if ((Sca+Dca) < 1.0)
pixel=QuantumRange*gamma*(0.5*Sca*PerceptibleReciprocal(1.0-Dca));
else
pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Dca)*
PerceptibleReciprocal(Sca));
break;
}
case SoftLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da))
{
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa*
(4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)-
DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case StampCompositeOp:
{
pixel=QuantumRange*(Sca+Dca*Dca-1.0);
break;
}
case StereoCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case ThresholdCompositeOp:
{
MagickRealType
delta;
delta=Sc-Dc;
if ((MagickRealType) fabs((double) (2.0*delta)) < threshold)
{
pixel=gamma*Dc;
break;
}
pixel=gamma*(Dc+delta*amount);
break;
}
case VividLightCompositeOp:
{
/*
VividLight: A Photoshop 7 composition method. See
http://www.simplefilter.de/en/basics/mixmods.html.
f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc))
*/
if ((fabs((double) Sa) < MagickEpsilon) ||
(fabs((double) (Sca-Sa)) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if ((2.0*Sca) <= Sa)
{
pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)*
PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0*
(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case XorCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
default:
{
pixel=Sc;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
if (canvas_image != (Image * ) NULL)
canvas_image=DestroyImage(canvas_image);
else
source_image=DestroyImage(source_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e x t u r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TextureImage() repeatedly tiles the texture image across and down the image
% canvas.
%
% The format of the TextureImage method is:
%
% MagickBooleanType TextureImage(Image *image,const Image *texture,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o texture_image: This image is the texture to layer on the background.
%
*/
MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture,
ExceptionInfo *exception)
{
#define TextureImageTag "Texture/Image"
CacheView
*image_view,
*texture_view;
Image
*texture_image;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (texture == (const Image *) NULL)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
texture_image=CloneImage(texture,0,0,MagickTrue,exception);
if (texture_image == (const Image *) NULL)
return(MagickFalse);
(void) TransformImageColorspace(texture_image,image->colorspace,exception);
(void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod,
exception);
status=MagickTrue;
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) ||
(image->alpha_trait != UndefinedPixelTrait) ||
(texture_image->alpha_trait != UndefinedPixelTrait)))
{
/*
Tile texture onto the image background.
*/
for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
MagickBooleanType
thread_status;
thread_status=CompositeImage(image,texture_image,image->compose,
MagickTrue,x+texture_image->tile_offset.x,y+
texture_image->tile_offset.y,exception);
if (thread_status == MagickFalse)
{
status=thread_status;
break;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,TextureImageTag,(MagickOffsetType)
image->rows,image->rows);
texture_image=DestroyImage(texture_image);
return(status);
}
/*
Tile texture onto the image background (optimized).
*/
status=MagickTrue;
texture_view=AcquireVirtualCacheView(texture_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(texture_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p,
*pixels;
register ssize_t
x;
register Quantum
*q;
size_t
width;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,
(y+texture_image->tile_offset.y) % texture_image->rows,
texture_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
register ssize_t
j;
p=pixels;
width=texture_image->columns;
if ((x+(ssize_t) width) > (ssize_t) image->columns)
width=image->columns-x;
for (j=0; j < (ssize_t) width; j++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(texture_image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait texture_traits=GetPixelChannelTraits(texture_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(texture_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(texture_image);
q+=GetPixelChannels(image);
}
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
texture_view=DestroyCacheView(texture_view);
image_view=DestroyCacheView(image_view);
texture_image=DestroyImage(texture_image);
return(status);
}
|
udr-2.c | /* { dg-do compile } */
/* { dg-options "-fopenmp" } */
struct W { int w; };
void init (struct W *, int, int *);
int v;
#pragma omp declare reduction (foo : long int : omp_out |= v) /* { dg-error "combiner refers to variable" } */
#pragma omp declare reduction (foo : char : omp_out = v) /* { dg-error "combiner refers to variable" } */
typedef short T;
#pragma omp declare reduction (foo : T : omp_out += v) /* { dg-error "combiner refers to variable" } */
#pragma omp declare reduction (foo : int : v *= omp_in) /* { dg-error "combiner refers to variable" } */
#pragma omp declare reduction (foo : struct W : omp_out.w *= omp_in.w + v) /* { dg-error "combiner refers to variable" } */
void
foo (int v)
{
#pragma omp declare reduction (foo : long int : omp_out |= v) /* { dg-error "combiner refers to variable" } */
#pragma omp declare reduction (foo : char : omp_out = v) /* { dg-error "combiner refers to variable" } */
#pragma omp declare reduction (foo : T : omp_out += v) /* { dg-error "combiner refers to variable" } */
#pragma omp declare reduction (foo : int : v *= omp_in) /* { dg-error "combiner refers to variable" } */
#pragma omp declare reduction (foo : struct W : omp_out.w *= omp_in.w + v) /* { dg-error "combiner refers to variable" } */
}
#pragma omp declare reduction (bar : long int : omp_out |= omp_in) initializer (omp_priv = v) /* { dg-error "initializer refers to variable" } */
#pragma omp declare reduction (bar : char : omp_out += omp_in) initializer (omp_priv = ((char) v)) /* { dg-error "initializer refers to variable" } */
#pragma omp declare reduction (bar : T : omp_out += omp_in) initializer (omp_priv = (short) v) /* { dg-error "initializer refers to variable" } */
#pragma omp declare reduction (bar : _Complex double : omp_out *= omp_in) initializer (omp_priv = (v)) /* { dg-error "initializer refers to variable" } */
#pragma omp declare reduction (bar : struct W : omp_out.w *= omp_in.w) initializer (omp_priv = { v } ) /* { dg-error "initializer refers to variable" } */
#pragma omp declare reduction (bar2 : struct W : omp_out.w *= omp_in.w) initializer (init (&omp_priv, v, (int *) 0)) /* { dg-error "initializer refers to variable" } */
#pragma omp declare reduction (bar3 : struct W : omp_out.w *= omp_in.w) initializer (init (&omp_priv, 0, &v)) /* { dg-error "initializer refers to variable" } */
void
bar (int v)
{
#pragma omp declare reduction (bar : long int : omp_out |= omp_in) initializer (omp_priv = v) /* { dg-error "initializer refers to variable" } */
#pragma omp declare reduction (bar : char : omp_out += omp_in) initializer (omp_priv = ((char) v)) /* { dg-error "initializer refers to variable" } */
#pragma omp declare reduction (bar : T : omp_out += omp_in) initializer (omp_priv = (short) v) /* { dg-error "initializer refers to variable" } */
#pragma omp declare reduction (bar : _Complex double : omp_out *= omp_in) initializer (omp_priv = (v)) /* { dg-error "initializer refers to variable" } */
#pragma omp declare reduction (bar : struct W : omp_out.w *= omp_in.w) initializer (omp_priv = { v }) /* { dg-error "initializer refers to variable" } */
#pragma omp declare reduction (bar2 : struct W : omp_out.w *= omp_in.w) initializer (init (&omp_priv, v, (int *) 0)) /* { dg-error "initializer refers to variable" } */
#pragma omp declare reduction (bar3 : struct W : omp_out.w *= omp_in.w) initializer (init (&omp_priv, 0, &v)) /* { dg-error "initializer refers to variable" } */
}
|
test_helper.h | //
// Created by liql2007 on 2020/12/23.
//
#ifndef EFANNA2E_TEST_HELPER_H
#define EFANNA2E_TEST_HELPER_H
#include <iostream>
#include <fstream>
#include <cassert>
#include <cstring>
#include <sys/stat.h>
#include <efanna2e/util.h>
#include <efanna2e/distance.h>
#include <efanna2e/neighbor.h>
template<typename T>
void print_vector(const T* vec, unsigned size) {
for (unsigned i = 0; i < size; ++i) {
std::cout << vec[i] << " ";
}
std::cout << std::endl;
}
template<typename T>
void load_data(const char* filename, T*& data, unsigned& num,
unsigned& dim) {
std::ifstream in(filename, std::ios::binary);
if (!in.is_open()) {
std::cout << "open file error" << std::endl;
exit(-1);
}
in.read((char*)&dim, 4);
// std::cout<<"data dimension: "<<dim<<std::endl;
in.seekg(0, std::ios::end);
std::ios::pos_type ss = in.tellg();
size_t fsize = (size_t)ss;
num = (unsigned)(fsize / (dim + 1) / 4);
data = new T[(size_t)num * (size_t)dim];
in.seekg(0, std::ios::beg);
for (size_t i = 0; i < num; i++) {
in.seekg(4, std::ios::cur);
in.read((char*)(data + i * dim), dim * sizeof(T));
}
in.close();
}
template<typename T>
void save_data(const char* filename, std::vector<std::vector<T>>& results) {
std::ofstream out(filename, std::ios::binary | std::ios::out);
for (unsigned i = 0; i < results.size(); i++) {
unsigned sz = (unsigned)results[i].size();
out.write((char*)&sz, sizeof(unsigned));
out.write((char*)results[i].data(), (size_t)sz * sizeof(T));
}
if (out.bad()) {
out.close();
std::cerr << "write to file [" << filename << "] failed" << std::endl;
exit(-1);
}
out.close();
}
template<typename T>
void save_data(const char* filename, const T* data, unsigned num, unsigned dim) {
std::ofstream out(filename, std::ios::binary | std::ios::out);
for (size_t i = 0; i < num; i++) {
out.write((char*)&dim, sizeof(unsigned));
out.write((char*)(data + i * dim), (size_t)dim * sizeof(T));
}
if (out.bad()) {
out.close();
std::cerr << "write to file [" << filename << "] failed" << std::endl;
exit(-1);
}
out.close();
}
inline void checkAndCreateDir(const char* dirPath) {
struct stat sb;
if (stat(dirPath, &sb) == 0) {
if (!S_ISDIR(sb.st_mode)) {
std::cerr << dirPath << " is not dictionary" << std::endl;
exit(-1);
}
} else if (mkdir(dirPath, 0755) != 0) {
std::cerr << "create dictionary [" << dirPath << "] failed" << std::endl;
exit(-1);
}
}
struct GroundTruth {
unsigned truthItemNum;
unsigned queryNum;
unsigned* data;
unsigned TOPK;
GroundTruth(unsigned TOPK) : TOPK(TOPK) {}
void load(const char* filename) {
load_data(filename, data, queryNum, truthItemNum);
std::cout << "ground truth query num: " << queryNum << std::endl;
std::cout << "ground truth item num per query: " << truthItemNum << std::endl;
}
void recallRate(const std::vector<std::vector<unsigned>>& res) {
assert(TOPK <= truthItemNum);
assert(res.size() <= queryNum);
float avgRecallVal = 0;
for (size_t qi = 0; qi < res.size(); ++qi) {
auto truth = data + qi * truthItemNum;
unsigned recallNum = 0;
for (auto docId : res[qi]) {
for (unsigned j = 0; j < TOPK; ++j) {
if (truth[j] == docId) {
++recallNum;
break;
}
}
}
auto recallRateVal = (float) recallNum / TOPK;
// recallRate.push_back(recallRateVal);
avgRecallVal += recallRateVal;
}
auto recall = avgRecallVal / res.size();
std::cout << "recall(top" << TOPK << ") : " << recall << std::endl;
}
static void createPartGroundTruth(const char* queryPath, const char* groundTruthPath,
const float* vecData, unsigned pointNum, unsigned dim,
unsigned queryNum, unsigned topK) {
efanna2e::DistanceL2 distance;
std::mt19937 rng(time(nullptr));
std::vector<unsigned> queryIds(queryNum);
efanna2e::GenRandom(rng, queryIds.data(), queryNum, pointNum);
std::vector<std::vector<unsigned>> topNeighbors(queryNum);
std::vector<float> qVecs((size_t)queryNum * dim);
#pragma omp parallel for
for (size_t i = 0; i < queryNum; ++i) {
auto qId = queryIds[i];
efanna2e::Neighbor nn(qId, 0, true);
std::vector<efanna2e::Neighbor> neighborPool;
neighborPool.reserve(topK + 1);
neighborPool.resize(topK);
neighborPool[0] = std::move(nn);
unsigned poolSize = 1;
auto q = vecData + (size_t)qId * dim;
std::memcpy(qVecs.data() + i * dim, q, dim * sizeof(float));
for (size_t vId = 0; vId < pointNum; ++vId) {
if (vId == qId) {
continue;
}
auto v = vecData + vId * dim;
float dist = distance.compare(v, q, dim);
efanna2e::Neighbor nn(vId, dist, true);
efanna2e::InsertIntoPool(neighborPool.data(), poolSize, nn);
if (poolSize < topK) {
++poolSize;
}
}
assert(poolSize == topK);
std::sort(neighborPool.begin(), neighborPool.end(),
[](const efanna2e::Neighbor& l, const efanna2e::Neighbor& r) {
return l.distance < r.distance; });
auto& queryTopNeighbor = topNeighbors[i];
queryTopNeighbor.reserve(topK);
for (const auto& nn : neighborPool) {
queryTopNeighbor.push_back(nn.id);
}
}
save_data(groundTruthPath, topNeighbors);
save_data(queryPath, qVecs.data(), queryNum, dim);
}
};
struct PartInfo {
size_t vecNum;
std::string docPath;
std::string idPath;
std::string nsgPath;
std::string knnPath;
std::string queryPath;
std::string groundTruthPath;
};
struct Partitions {
std::vector<PartInfo> partInfos;
size_t totalVecNum = 0;
std::string dirPath;
size_t dim;
std::string getMetaPath() { return dirPath + "meta.txt"; }
std::string getMergedNsgPath() { return dirPath + "merged.nsg"; }
std::string getMergedVecPath() { return dirPath + "merged.fvecs"; }
void serialize() {
auto metaPath = getMetaPath();
std::cout << "serialize partition meta to " << metaPath << std::endl;
std::ofstream out(metaPath.c_str());
out << "partition num: " << partInfos.size() << std::endl;
out << "dimension: " << dim << std::endl;
out << "total doc num: " << totalVecNum << std::endl;
for (unsigned i = 0; i < partInfos.size(); ++i) {
out << "partition_" << i + 1 << " doc num: " <<
partInfos[i].vecNum << std::endl;
}
out.close();
}
void deserialize(const char* dirPath) {
struct stat sb;
if (stat(dirPath, &sb) != 0 || !S_ISDIR(sb.st_mode)) {
std::cerr << dirPath << " is not dictionary" << std::endl;
exit(-1);
}
std::string metaPath;
if (dirPath[std::strlen(dirPath) - 1] != '/') {
metaPath = dirPath + std::string("/meta.txt");
} else {
metaPath = dirPath + std::string("meta.txt");
}
std::ifstream in(metaPath.c_str());
if (!in.is_open()) {
std::cout << "open file " << metaPath << " failed" << std::endl;
exit(-1);
}
std::string desc;
std::getline(in, desc, ':');
unsigned partNum;
in >> partNum;
std::cout << "partition num: " << partNum << std::endl;
init(dirPath, partNum);
std::getline(in, desc, ':');
in >> dim;
std::cout << "dim: " << dim << std::endl;
std::getline(in, desc, ':');
in >> totalVecNum;
std::cout << "vector num: " << totalVecNum << std::endl;
for (auto& part : partInfos) {
std::getline(in, desc, ':');
in >> part.vecNum;
std::cout << "partition vector num: " << part.vecNum << std::endl;
}
}
void init(const char* dirName, unsigned partNum) {
dirPath = dirName;
if (dirPath[dirPath.length() - 1] != '/') {
dirPath.append("/");
}
partInfos.clear();
partInfos.reserve(partNum);
for (unsigned i = 0; i < partNum; ++i) {
auto docPath = dirPath + "docs_" + std::to_string(i + 1) + ".fvecs";
auto idPath = dirPath + "ids_" + std::to_string(i + 1) + ".data";
auto nsgPath = dirPath + "nng_" + std::to_string(i + 1) + ".nsg";
auto knnPath = dirPath + "nng_" + std::to_string(i + 1) + ".knn";
auto queryPath = dirPath + "query_" + std::to_string(i + 1) + ".fvecs";
auto groundTruthPath = dirPath + "groundtruth_" + std::to_string(i + 1) + ".ivecs";
PartInfo part{0, docPath, idPath, nsgPath, knnPath, queryPath, groundTruthPath};
partInfos.emplace_back(std::move(part));
}
}
static Partitions create(const char* dirPath, unsigned partNum) {
checkAndCreateDir(dirPath);
Partitions ret;
ret.init(dirPath, partNum);
return ret;
}
};
#endif //EFANNA2E_TEST_HELPER_H
|
drupal7_fmt_plug.c | /*
* Drupal 7 phpass variant using SHA-512 and hashes cut at 258 bits.
*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* These are 8 byte salted hashes with a loop count that defines the number
* of loops to compute. Drupal uses 258 bits of the hash, this is a multiple of
* 6 but not 8. I presume this is for getting unpadded base64. Anyway we store
* an extra byte but for now we will only compare 256 bits. I doubt that will
* pose any problems. Actually I'm not quite sure the last bits end up correct
* from the current version of get_binary().
*
* Based on [old thick] phpass-md5.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_drupal7;
#elif FMT_REGISTERS_H
john_register_one(&fmt_drupal7);
#else
#include "sha2.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 8
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Drupal7"
#define FORMAT_NAME "$S$"
#define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME
#define BENCHMARK_COMMENT " (x16385)"
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 47
#define CIPHERTEXT_LENGTH 55
#define DIGEST_SIZE (512/8)
#define BINARY_SIZE (258/8) // ((258+7)/8)
#define BINARY_ALIGN 4
#define SALT_SIZE 8
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 )
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests tests[] = {
{"$S$CwkjgAKeSx2imSiN3SyBEg8e0sgE2QOx4a/VIfCHN0BZUNAWCr1X", "virtualabc"},
{"$S$CFURCPa.k6FAEbJPgejaW4nijv7rYgGc4dUJtChQtV4KLJTPTC/u", "password"},
{"$S$C6x2r.aW5Nkg7st6/u.IKWjTerHXscjPtu4spwhCVZlP89UKcbb/", "NEW_TEMP_PASSWORD"},
{NULL}
};
/*
* NOTE, due to the 0x4000 iteration count, I am not wasting time pre-loading
* keys/salts. We will simply add SIMD code to the crypt_all. We could only
* gain < .1% worrying about all the extra stuff from set_key, get_key, the
* hashes, etc needed to split out SIMD. We just keep all input data in 'flat'
* format, switch to SIMD, do the 0x4000 loops, and put output back into 'flat'
* layout again. So we have no 'static' SIMD objects.
*/
static unsigned char *cursalt;
static unsigned loopCnt;
static unsigned char (*EncKey)[PLAINTEXT_LENGTH + 1];
static unsigned int *EncKeyLen;
static char (*crypt_key)[DIGEST_SIZE];
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
EncKey = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*EncKey));
EncKeyLen = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*EncKeyLen));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(EncKeyLen);
MEM_FREE(EncKey);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int i;
unsigned count_log2;
if (strlen(ciphertext) != CIPHERTEXT_LENGTH)
return 0;
if (strncmp(ciphertext, "$S$", 3) != 0)
return 0;
for (i = 3; i < CIPHERTEXT_LENGTH; ++i)
if (atoi64[ARCH_INDEX(ciphertext[i])] == 0x7F)
return 0;
count_log2 = atoi64[ARCH_INDEX(ciphertext[3])];
if (count_log2 < 7 || count_log2 > 31)
return 0;
return 1;
}
static void set_salt(void *salt)
{
loopCnt = (1 << (atoi64[ARCH_INDEX(((char*)salt)[8])]));
cursalt = salt;
}
static void set_key(char *key, int index)
{
int len;
len = strlen(key);
EncKeyLen[index] = len;
memcpy(((char*)EncKey[index]), key, len + 1);
}
static char *get_key(int index)
{
return (char*)EncKey[index];
}
static int cmp_all(void *binary, int count)
{
int index;
for(index = 0; index < count; index++)
if (!memcmp(binary, crypt_key[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index+=MAX_KEYS_PER_CRYPT)
{
#ifdef SIMD_COEF_64
unsigned char _IBuf[128*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys;
ARCH_WORD_64 *keys64;
unsigned i, j, len, Lcount = loopCnt;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);
keys64 = (ARCH_WORD_64*)keys;
memset(keys, 0, 128*MAX_KEYS_PER_CRYPT);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len = EncKeyLen[index+i];
for (j = 0; j < 8; ++j)
keys[GETPOS(j, i)] = cursalt[j];
for (j = 0; j < len; ++j)
keys[GETPOS(j+8, i)] = EncKey[index+i][j];
keys[GETPOS(j+8, i)] = 0x80;
keys64[15*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = (len+8) << 3;
}
SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len = EncKeyLen[index+i];
for (j = 0; j < len; ++j)
keys[GETPOS(j+64, i)] = EncKey[index+i][j];
keys[GETPOS(j+64, i)] = 0x80;
keys64[15*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = (len+64) << 3;
}
while (--Lcount)
SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
// Last one with FLAT_OUT
SIMDSHA512body(keys, (ARCH_WORD_64*)crypt_key[index], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT|SSEi_FLAT_OUT);
#else
SHA512_CTX ctx;
unsigned char tmp[DIGEST_SIZE + PLAINTEXT_LENGTH];
int len = EncKeyLen[index];
unsigned Lcount = loopCnt - 1;
SHA512_Init( &ctx );
SHA512_Update( &ctx, cursalt, 8 );
SHA512_Update( &ctx, EncKey[index], len );
memcpy(&tmp[DIGEST_SIZE], (char *)EncKey[index], len);
SHA512_Final( tmp, &ctx);
len += DIGEST_SIZE;
do {
SHA512_Init( &ctx );
SHA512_Update( &ctx, tmp, len);
SHA512_Final( tmp, &ctx);
} while (--Lcount);
SHA512_Init( &ctx );
SHA512_Update( &ctx, tmp, len);
SHA512_Final( (unsigned char *) crypt_key[index], &ctx);
#endif
}
return count;
}
static void * get_binary(char *ciphertext)
{
int i;
unsigned sixbits;
static union {
unsigned char u8[BINARY_SIZE + 1];
ARCH_WORD_32 u32;
} out;
int bidx=0;
char *pos;
pos = &ciphertext[3 + 1 + 8];
for (i = 0; i < 10; ++i) {
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<6);
sixbits >>= 2;
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<4);
sixbits >>= 4;
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<2);
}
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<6);
sixbits >>= 2;
out.u8[bidx] = sixbits;
sixbits = atoi64[ARCH_INDEX(*pos++)];
out.u8[bidx++] |= (sixbits<<4);
return out.u8;
}
static void * get_salt(char *ciphertext)
{
static union {
unsigned char u8[SALT_SIZE + 1];
ARCH_WORD_32 u32;
} salt;
// store off the 'real' 8 bytes of salt
memcpy(salt.u8, &ciphertext[4], 8);
// append the 1 byte of loop count information.
salt.u8[8] = ciphertext[3];
return salt.u8;
}
static int get_hash_0(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_0; }
static int get_hash_1(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_1; }
static int get_hash_2(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_2; }
static int get_hash_3(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_3; }
static int get_hash_4(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_4; }
static int get_hash_5(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_5; }
static int get_hash_6(int index) { return *((ARCH_WORD_32 *)&crypt_key[index]) & PH_MASK_6; }
static int salt_hash(void *salt)
{
return *((ARCH_WORD_32 *)salt) & 0x3FF;
}
static unsigned int iteration_count(void *salt)
{
return (unsigned int) 1 << (atoi64[ARCH_INDEX(((char*)salt)[8])]);
}
struct fmt_main fmt_drupal7 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
// true salt is SALT_SIZE but we add the loop count
SALT_SIZE + 1,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
interpolation_pl.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
static inline void InterpolateBlock_PL(level_type *level_f, int id_f, double prescale_f, level_type *level_c, int id_c, blockCopy_type *block){
// interpolate 3D array from read_i,j,k of read[] to write_i,j,k in write[]
int write_dim_i = block->dim.i<<1; // calculate the dimensions of the resultant fine block
int write_dim_j = block->dim.j<<1;
int write_dim_k = block->dim.k<<1;
int read_i = block->read.i;
int read_j = block->read.j;
int read_k = block->read.k;
int read_jStride = block->read.jStride;
int read_kStride = block->read.kStride;
int write_i = block->write.i;
int write_j = block->write.j;
int write_k = block->write.k;
int write_jStride = block->write.jStride;
int write_kStride = block->write.kStride;
double * __restrict__ read = block->read.ptr;
double * __restrict__ write = block->write.ptr;
double * __restrict__ valid;
if(block->read.box >=0){
read = level_c->my_boxes[ block->read.box].vectors[ id_c] + level_c->my_boxes[ block->read.box].ghosts*(1+level_c->my_boxes[ block->read.box].jStride+level_c->my_boxes[ block->read.box].kStride);
valid= level_c->my_boxes[ block->read.box].vectors[VECTOR_VALID] + level_c->my_boxes[ block->read.box].ghosts*(1+level_c->my_boxes[ block->read.box].jStride+level_c->my_boxes[ block->read.box].kStride);
read_jStride = level_c->my_boxes[block->read.box ].jStride;
read_kStride = level_c->my_boxes[block->read.box ].kStride;
}
if(block->write.box>=0){
write = level_f->my_boxes[block->write.box].vectors[id_f] + level_f->my_boxes[block->write.box].ghosts*(1+level_f->my_boxes[block->write.box].jStride+level_f->my_boxes[block->write.box].kStride);
write_jStride = level_f->my_boxes[block->write.box].jStride;
write_kStride = level_f->my_boxes[block->write.box].kStride;
}
int i,j,k;
for(k=0;k<write_dim_k;k++){
for(j=0;j<write_dim_j;j++){
for(i=0;i<write_dim_i;i++){
int write_ijk = ((i )+write_i) + (((j )+write_j)*write_jStride) + (((k )+write_k)*write_kStride);
int read_ijk = ((i>>1)+ read_i) + (((j>>1)+ read_j)* read_jStride) + (((k>>1)+ read_k)* read_kStride);
//
// | o | o |
// +---+---+---+---+
// | | x | x | |
//
// CAREFUL !!! you must guarantee you zero'd the MPI buffers(write[]) and destination boxes at some point to avoid 0.0*NaN or 0.0*inf
// piecewise linear interpolation... NOTE, BC's must have been previously applied
int delta_i= -1;if(i&0x1)delta_i= 1; // i.e. even points look backwards while odd points look forward
int delta_j=-read_jStride;if(j&0x1)delta_j=read_jStride;
int delta_k=-read_kStride;if(k&0x1)delta_k=read_kStride;
write[write_ijk] = prescale_f*write[write_ijk] +
0.421875*read[read_ijk ] +
0.140625*read[read_ijk +delta_k] +
0.140625*read[read_ijk +delta_j ] +
0.046875*read[read_ijk +delta_j+delta_k] +
0.140625*read[read_ijk+delta_i ] +
0.046875*read[read_ijk+delta_i +delta_k] +
0.046875*read[read_ijk+delta_i+delta_j ] +
0.015625*read[read_ijk+delta_i+delta_j+delta_k];
}}}
}
//------------------------------------------------------------------------------------------------------------------------------
// perform a (inter-level) piecewise linear interpolation
void interpolation_pl(level_type * level_f, int id_f, double prescale_f, level_type *level_c, int id_c){
exchange_boundary(level_c,id_c,0);
apply_BCs_linear(level_c,id_c);
uint64_t _timeCommunicationStart = CycleTime();
uint64_t _timeStart,_timeEnd;
int buffer=0;
int sendBox,recvBox,n;
#ifdef USE_MPI
// loop through packed list of MPI receives and prepost Irecv's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_f->interpolation.num_recvs;n++){
MPI_Irecv(level_f->interpolation.recv_buffers[n],
level_f->interpolation.recv_sizes[n],
MPI_DOUBLE,
level_f->interpolation.recv_ranks[n],
0, // only one message should be received from each neighboring process
MPI_COMM_WORLD,
&level_f->interpolation.requests[n]
);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_recv += (_timeEnd-_timeStart);
// pack MPI send buffers...
_timeStart = CycleTime();
#pragma omp parallel for private(buffer) if(level_c->interpolation.num_blocks[0]>1) schedule(static,1)
for(buffer=0;buffer<level_c->interpolation.num_blocks[0];buffer++){InterpolateBlock_PL(level_f,id_f,0.0,level_c,id_c,&level_c->interpolation.blocks[0][buffer]);} // !!! prescale==0 because you don't want to increment the MPI buffer
_timeEnd = CycleTime();
level_f->cycles.interpolation_pack += (_timeEnd-_timeStart);
// loop through MPI send buffers and post Isend's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_c->interpolation.num_sends;n++){
MPI_Isend(level_c->interpolation.send_buffers[n],
level_c->interpolation.send_sizes[n],
MPI_DOUBLE,
level_c->interpolation.send_ranks[n],
0, // only one message should be sent to each neighboring process
MPI_COMM_WORLD,
&level_c->interpolation.requests[n]
);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_send += (_timeEnd-_timeStart);
#endif
// perform local interpolation... try and hide within Isend latency...
_timeStart = CycleTime();
#pragma omp parallel for private(buffer) if(level_c->interpolation.num_blocks[1]>1) schedule(static,1)
for(buffer=0;buffer<level_c->interpolation.num_blocks[1];buffer++){InterpolateBlock_PL(level_f,id_f,prescale_f,level_c,id_c,&level_c->interpolation.blocks[1][buffer]);}
_timeEnd = CycleTime();
level_f->cycles.interpolation_local += (_timeEnd-_timeStart);
// wait for MPI to finish...
#ifdef USE_MPI
_timeStart = CycleTime();
if(level_c->interpolation.num_sends)MPI_Waitall(level_c->interpolation.num_sends,level_c->interpolation.requests,level_c->interpolation.status);
if(level_f->interpolation.num_recvs)MPI_Waitall(level_f->interpolation.num_recvs,level_f->interpolation.requests,level_f->interpolation.status);
_timeEnd = CycleTime();
level_f->cycles.interpolation_wait += (_timeEnd-_timeStart);
// unpack MPI receive buffers
_timeStart = CycleTime();
#pragma omp parallel for private(buffer) if(level_f->interpolation.num_blocks[2]>1) schedule(static,1)
for(buffer=0;buffer<level_f->interpolation.num_blocks[2];buffer++){IncrementBlock(level_f,id_f,prescale_f,&level_f->interpolation.blocks[2][buffer]);}
_timeEnd = CycleTime();
level_f->cycles.interpolation_unpack += (_timeEnd-_timeStart);
#endif
level_f->cycles.interpolation_total += (uint64_t)(CycleTime()-_timeCommunicationStart);
}
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImage method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _PixelChannels
{
double
channel[CompositePixelChannel];
} PixelChannels;
static PixelChannels **DestroyPixelThreadSet(PixelChannels **pixels)
{
register ssize_t
i;
assert(pixels != (PixelChannels **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (PixelChannels *) NULL)
pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]);
pixels=(PixelChannels **) RelinquishMagickMemory(pixels);
return(pixels);
}
static PixelChannels **AcquirePixelThreadSet(const Image *image)
{
PixelChannels
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(PixelChannels **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (PixelChannels **) NULL)
return((PixelChannels **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
register ssize_t
j;
pixels[i]=(PixelChannels *) AcquireQuantumMemory(image->columns,
sizeof(**pixels));
if (pixels[i] == (PixelChannels *) NULL)
return(DestroyPixelThreadSet(pixels));
for (j=0; j < (ssize_t) image->columns; j++)
{
register ssize_t
k;
for (k=0; k < MaxPixelChannels; k++)
pixels[i][j].channel[k]=0.0;
}
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelChannels
*color_1,
*color_2;
double
distance;
register ssize_t
i;
color_1=(const PixelChannels *) x;
color_2=(const PixelChannels *) y;
distance=0.0;
for (i=0; i < MaxPixelChannels; i++)
distance+=color_1->channel[i]-(double) color_2->channel[i];
return(distance < 0 ? -1 : distance > 0 ? 1 : 0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel,
const MagickEvaluateOperator op,const double value)
{
double
result;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(double) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a positive
result. It differs from % or fmod() that returns a 'truncated modulus'
result, where floor() is replaced by trunc() and could return a
negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(double) ((size_t) pixel & (size_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise,
value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) ((size_t) pixel << (size_t) (value+0.5));
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+
1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(double) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(double) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(double) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(double) ((size_t) pixel | (size_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise,
value);
break;
}
case PowEvaluateOperator:
{
result=(double) (QuantumRange*pow((double) (QuantumScale*pixel),(double)
value));
break;
}
case RightShiftEvaluateOperator:
{
result=(double) ((size_t) pixel >> (size_t) (value+0.5));
break;
}
case RootMeanSquareEvaluateOperator:
{
result=(double) (pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(double) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(double) (((double) pixel > value) ? QuantumRange : pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise,
value);
break;
}
case XorEvaluateOperator:
{
result=(double) ((size_t) pixel ^ (size_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
rows;
q=images;
columns=images->columns;
rows=images->rows;
for (p=images; p != (Image *) NULL; p=p->next)
{
if (p->number_channels > q->number_channels)
q=p;
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict evaluate_pixels;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j,
k;
for (j=0; j < (ssize_t) number_images; j++)
for (k=0; k < MaxPixelChannels; k++)
evaluate_pixel[j].channel[k]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
register ssize_t
i;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait evaluate_traits=GetPixelChannelTraits(image,channel);
PixelTrait traits = GetPixelChannelTraits(next,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[j].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(image,channel,p),op,
evaluate_pixel[j].channel[i]);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
for (k=0; k < (ssize_t) GetPixelChannels(image); k++)
q[k]=ClampToQuantum(evaluate_pixel[j/2].channel[k]);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register ssize_t
i,
x;
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[x].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(image,channel,p),j == 0 ?
AddEvaluateOperator : op,evaluate_pixel[x].channel[i]);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
switch (op)
{
case MeanEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]/=(double) number_images;
break;
}
case MultiplyEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
evaluate_pixel[x].channel[i]*=QuantumScale;
}
break;
}
case RootMeanSquareEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/
number_images);
break;
}
default:
break;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
result;
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
result=ApplyEvaluateOperator(random_info[id],q[i],op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
q[i]=ClampToQuantum(result);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImage)
#endif
proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImage method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
double
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+
c1*x^2+c2*x+c3).
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel+parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
double
amplitude,
bias,
frequency,
phase;
/*
Sinusoid: frequency, phase, amplitude, bias.
*/
frequency=(number_parameters >= 1) ? parameters[0] : 1.0;
phase=(number_parameters >= 2) ? parameters[1] : 0.0;
amplitude=(number_parameters >= 3) ? parameters[2] : 0.5;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (QuantumRange*(amplitude*sin((double) (2.0*
MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias));
break;
}
case ArcsinFunction:
{
double
bias,
center,
range,
width;
/*
Arcsin (peged at range limits for invalid results): width, center,
range, and bias.
*/
width=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=2.0/width*(QuantumScale*pixel-center);
if ( result <= -1.0 )
result=bias-range/2.0;
else
if (result >= 1.0)
result=bias+range/2.0;
else
result=(double) (range/MagickPI*asin((double) result)+bias);
result*=QuantumRange;
break;
}
case ArctanFunction:
{
double
center,
bias,
range,
slope;
/*
Arctan: slope, center, range, and bias.
*/
slope=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (MagickPI*slope*(QuantumScale*pixel-center));
result=(double) (QuantumRange*(range/MagickPI*atan((double)
result)+bias));
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateFunctionImage(image,function,number_parameters,parameters,
exception) != MagickFalse)
return(MagickTrue);
#endif
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyFunction(q[i],function,number_parameters,parameters,
exception);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FunctionImage)
#endif
proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageEntropy method is:
%
% MagickBooleanType GetImageEntropy(const Image *image,double *entropy,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*entropy=channel_statistics[CompositePixelChannel].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageExtrema method is:
%
% MagickBooleanType GetImageExtrema(const Image *image,size_t *minima,
% size_t *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageRange(image,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageKurtosis() returns the kurtosis and skewness of one or more image
% channels.
%
% The format of the GetImageKurtosis method is:
%
% MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis,
% double *skewness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*kurtosis=channel_statistics[CompositePixelChannel].kurtosis;
*skewness=channel_statistics[CompositePixelChannel].skewness;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMean() returns the mean and standard deviation of one or more image
% channels.
%
% The format of the GetImageMean method is:
%
% MagickBooleanType GetImageMean(const Image *image,double *mean,
% double *standard_deviation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*mean=channel_statistics[CompositePixelChannel].mean;
*standard_deviation=
channel_statistics[CompositePixelChannel].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageMoments method is:
%
% ChannelMoments *GetImageMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
channels++;
}
return((size_t) (channels == 0 ? 1 : channels));
}
MagickExport ChannelMoments *GetImageMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
CacheView
*image_view;
ChannelMoments
*channel_moments;
double
M00[MaxPixelChannels+1],
M01[MaxPixelChannels+1],
M02[MaxPixelChannels+1],
M03[MaxPixelChannels+1],
M10[MaxPixelChannels+1],
M11[MaxPixelChannels+1],
M12[MaxPixelChannels+1],
M20[MaxPixelChannels+1],
M21[MaxPixelChannels+1],
M22[MaxPixelChannels+1],
M30[MaxPixelChannels+1];
PointInfo
centroid[MaxPixelChannels+1];
ssize_t
channel,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) memset(channel_moments,0,(MaxPixelChannels+1)*
sizeof(*channel_moments));
(void) memset(centroid,0,sizeof(centroid));
(void) memset(M00,0,sizeof(M00));
(void) memset(M01,0,sizeof(M01));
(void) memset(M02,0,sizeof(M02));
(void) memset(M03,0,sizeof(M03));
(void) memset(M10,0,sizeof(M10));
(void) memset(M11,0,sizeof(M11));
(void) memset(M12,0,sizeof(M12));
(void) memset(M20,0,sizeof(M20));
(void) memset(M21,0,sizeof(M21));
(void) memset(M22,0,sizeof(M22));
(void) memset(M30,0,sizeof(M30));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M00[channel]+=QuantumScale*p[i];
M00[MaxPixelChannels]+=QuantumScale*p[i];
M10[channel]+=x*QuantumScale*p[i];
M10[MaxPixelChannels]+=x*QuantumScale*p[i];
M01[channel]+=y*QuantumScale*p[i];
M01[MaxPixelChannels]+=y*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
if (M00[channel] < MagickEpsilon)
{
M00[channel]+=MagickEpsilon;
centroid[channel].x=(double) image->columns/2.0;
centroid[channel].y=(double) image->rows/2.0;
continue;
}
M00[channel]+=MagickEpsilon;
centroid[channel].x=M10[channel]/M00[channel];
centroid[channel].y=M01[channel]/M00[channel];
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute the image moments.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
M00[MaxPixelChannels]/=GetImageChannels(image);
M01[MaxPixelChannels]/=GetImageChannels(image);
M02[MaxPixelChannels]/=GetImageChannels(image);
M03[MaxPixelChannels]/=GetImageChannels(image);
M10[MaxPixelChannels]/=GetImageChannels(image);
M11[MaxPixelChannels]/=GetImageChannels(image);
M12[MaxPixelChannels]/=GetImageChannels(image);
M20[MaxPixelChannels]/=GetImageChannels(image);
M21[MaxPixelChannels]/=GetImageChannels(image);
M22[MaxPixelChannels]/=GetImageChannels(image);
M30[MaxPixelChannels]/=GetImageChannels(image);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0*
M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon)));
if (fabs(M11[channel]) < MagickEpsilon)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
else
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y/
(channel_moments[channel].ellipse_axis.x+MagickEpsilon)));
channel_moments[channel].ellipse_intensity=M00[channel]/
(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0);
M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0);
M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0);
M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0);
M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0);
M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0);
M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0);
M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0);
M00[channel]=1.0;
}
image_view=DestroyCacheView(image_view);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].invariant[0]=M20[channel]+M02[channel];
channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+
M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImagePerceptualHash method is:
%
% ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*perceptual_hash;
char
*colorspaces,
*q;
const char
*artifact;
MagickBooleanType
status;
register char
*p;
register ssize_t
i;
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
MaxPixelChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
artifact=GetImageArtifact(image,"phash:colorspaces");
if (artifact != NULL)
colorspaces=AcquireString(artifact);
else
colorspaces=AcquireString("sRGB,HCLp");
perceptual_hash[0].number_colorspaces=0;
perceptual_hash[0].number_channels=0;
q=colorspaces;
for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++)
{
ChannelMoments
*moments;
Image
*hash_image;
size_t
j;
ssize_t
channel,
colorspace;
if (i >= MaximumNumberOfPerceptualColorspaces)
break;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p);
if (colorspace < 0)
break;
perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace;
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
break;
hash_image->depth=8;
status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace,
exception);
if (status == MagickFalse)
break;
moments=GetImageMoments(hash_image,exception);
perceptual_hash[0].number_colorspaces++;
perceptual_hash[0].number_channels+=GetImageChannels(hash_image);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
break;
for (channel=0; channel <= MaxPixelChannels; channel++)
for (j=0; j < MaximumNumberOfImageMoments; j++)
perceptual_hash[channel].phash[i][j]=
(-MagickLog10(moments[channel].invariant[j]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
}
colorspaces=DestroyString(colorspaces);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageRange() returns the range of one or more image channels.
%
% The format of the GetImageRange method is:
%
% MagickBooleanType GetImageRange(const Image *image,double *minima,
% double *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima,
double *maxima,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
initialize,
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
initialize=MagickTrue;
*maxima=0.0;
*minima=0.0;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,initialize) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
row_maxima = 0.0,
row_minima = 0.0;
MagickBooleanType
row_initialize;
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
row_initialize=MagickTrue;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (row_initialize != MagickFalse)
{
row_minima=(double) p[i];
row_maxima=(double) p[i];
row_initialize=MagickFalse;
}
else
{
if ((double) p[i] < row_minima)
row_minima=(double) p[i];
if ((double) p[i] > row_maxima)
row_maxima=(double) p[i];
}
}
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageRange)
#endif
{
if (initialize != MagickFalse)
{
*minima=row_minima;
*maxima=row_maxima;
initialize=MagickFalse;
}
else
{
if (row_minima < *minima)
*minima=row_minima;
if (row_maxima > *maxima)
*maxima=row_maxima;
}
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageStatistics() returns statistics for each channel in the image. The
% statistics include the channel depth, its minima, maxima, mean, standard
% deviation, kurtosis and skewness. You can access the red channel mean, for
% example, like this:
%
% channel_statistics=GetImageStatistics(image,exception);
% red_mean=channel_statistics[RedPixelChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageStatistics method is:
%
% ChannelStatistics *GetImageStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
*histogram,
standard_deviation;
MagickStatusType
status;
QuantumAny
range;
register ssize_t
i;
size_t
depth;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(
MaxPixelChannels+1,sizeof(*channel_statistics));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (double *) NULL))
{
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,(MaxPixelChannels+1)*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[channel].depth;
range=GetQuantumRange(depth);
status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),
range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[channel].depth++;
i--;
continue;
}
}
if ((double) p[i] < channel_statistics[channel].minima)
channel_statistics[channel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[channel].maxima)
channel_statistics[channel].maxima=(double) p[i];
channel_statistics[channel].sum+=p[i];
channel_statistics[channel].sum_squared+=(double) p[i]*p[i];
channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i];
channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*
p[i];
channel_statistics[channel].area++;
if ((double) p[i] < channel_statistics[CompositePixelChannel].minima)
channel_statistics[CompositePixelChannel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima)
channel_statistics[CompositePixelChannel].maxima=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum((double) p[i]))+i]++;
channel_statistics[CompositePixelChannel].sum+=(double) p[i];
channel_statistics[CompositePixelChannel].sum_squared+=(double)
p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_cubed+=(double)
p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_fourth_power+=(double)
p[i]*p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].area++;
}
p+=GetPixelChannels(image);
}
}
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal(channel_statistics[i].area);
channel_statistics[i].sum*=area;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area-
1.0)*channel_statistics[i].area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
number_bins;
register ssize_t
j;
/*
Compute pixel entropy.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
number_bins=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
if (histogram[GetPixelChannels(image)*j+i] > 0.0)
number_bins++;
area=PerceptibleReciprocal(channel_statistics[channel].area);
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
count;
count=area*histogram[GetPixelChannels(image)*j+i];
channel_statistics[channel].entropy+=-count*MagickLog10(count)*
PerceptibleReciprocal(MagickLog10(number_bins));
channel_statistics[CompositePixelChannel].entropy+=-count*
MagickLog10(count)*PerceptibleReciprocal(MagickLog10(number_bins))/
GetPixelChannels(image);
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
channel_statistics[CompositePixelChannel].mean=0.0;
channel_statistics[CompositePixelChannel].standard_deviation=0.0;
channel_statistics[CompositePixelChannel].entropy=0.0;
for (i=0; i < (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[CompositePixelChannel].mean+=
channel_statistics[i].mean;
channel_statistics[CompositePixelChannel].standard_deviation+=
channel_statistics[i].standard_deviation;
channel_statistics[CompositePixelChannel].entropy+=
channel_statistics[i].entropy;
}
channel_statistics[CompositePixelChannel].mean/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].standard_deviation/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].entropy/=(double)
GetImageChannels(image);
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict polynomial_pixels;
size_t
number_images;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register ssize_t
i,
x;
register PixelChannels
*polynomial_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_pixel=polynomial_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
polynomial_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
if (j >= (ssize_t) number_terms)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
MagickRealType
coefficient,
degree;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(polynomial_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
coefficient=(MagickRealType) terms[2*j];
degree=(MagickRealType) terms[(j << 1)+1];
polynomial_pixel[x].channel[i]+=coefficient*
pow(QuantumScale*GetPixelChannel(image,channel,p),degree);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PolynomialImages)
#endif
proceed=SetImageProgress(images,PolynomialImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _SkipNode
{
size_t
next[9],
count,
signature;
} SkipNode;
typedef struct _SkipList
{
ssize_t
level;
SkipNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed;
SkipList
skip_list;
size_t
signature;
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
if (pixel_list->skip_list.nodes != (SkipNode *) NULL)
pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory(
pixel_list->skip_list.nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) memset((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->skip_list.nodes));
if (pixel_list->skip_list.nodes == (SkipNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) memset(pixel_list->skip_list.nodes,0,65537UL*
sizeof(*pixel_list->skip_list.nodes));
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) memset(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const size_t color)
{
register SkipList
*p;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
p=(&pixel_list->skip_list);
p->nodes[color].signature=pixel_list->signature;
p->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=p->level; level >= 0; level--)
{
while (p->nodes[search].next[level] < color)
search=p->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (p->level+2))
level=p->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > p->level)
{
p->level++;
update[p->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
p->nodes[color].next[level]=p->nodes[update[level]].next[level];
p->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static inline void GetMaximumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
maximum;
ssize_t
count;
/*
Find the maximum value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
maximum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) maximum);
}
static inline void GetMeanPixelList(PixelList *pixel_list,Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the mean value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sum);
}
static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the median value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
do
{
color=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetMinimumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
minimum;
ssize_t
count;
/*
Find the minimum value for each of the color.
*/
p=(&pixel_list->skip_list);
count=0;
color=65536UL;
minimum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) minimum);
}
static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
max_count,
mode;
ssize_t
count;
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
p=(&pixel_list->skip_list);
color=65536L;
mode=color;
max_count=p->nodes[mode].count;
count=0;
do
{
color=p->nodes[color].next[0];
if (p->nodes[color].count > max_count)
{
mode=color;
max_count=p->nodes[mode].count;
}
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) mode);
}
static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
next,
previous;
ssize_t
count;
/*
Finds the non peak value for each of the colors.
*/
p=(&pixel_list->skip_list);
color=65536L;
next=p->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetRootMeanSquarePixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the root mean square value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) (p->nodes[color].count*color*color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum));
}
static inline void GetStandardDeviationPixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum,
sum_squared;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the standard-deviation value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
for (i=0; i < (ssize_t) p->nodes[color].count; i++)
sum_squared+=((double) color)*((double) color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum_squared-(sum*sum)));
}
static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(pixel);
signature=pixel_list->skip_list.nodes[index].signature;
if (signature == pixel_list->signature)
{
pixel_list->skip_list.nodes[index].count++;
return;
}
AddNodePixelList(pixel_list,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register SkipNode
*root;
register SkipList
*p;
/*
Reset the skip-list.
*/
p=(&pixel_list->skip_list);
root=p->nodes+65536UL;
p->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
ssize_t
center,
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,0,0,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(statistic_image,DirectClass,exception);
if (status == MagickFalse)
{
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1));
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))*
(MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y-
(ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1),
MagickMax(height,1),exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
Quantum
pixel;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(statistic_traits == UndefinedPixelTrait))
continue;
if (((statistic_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(statistic_image,channel,p[center+i],q);
continue;
}
if ((statistic_traits & UpdatePixelTrait) == 0)
continue;
pixels=p;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) MagickMax(height,1); v++)
{
for (u=0; u < (ssize_t) MagickMax(width,1); u++)
{
InsertPixelList(pixels[i],pixel_list[id]);
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
switch (type)
{
case GradientStatistic:
{
double
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=(double) pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=(double) pixel;
pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum));
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
GetRootMeanSquarePixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
SetPixelChannel(statistic_image,channel,pixel,q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(statistic_image);
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_StatisticImage)
#endif
proceed=SetImageProgress(image,StatisticImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
boundary_conditions_and_contact_utilities.h | /*
==============================================================================
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNER.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
//
// Project Name: Kratos
// Last Modified by: $Author: Nelson Lafontaine $
// Date: $Date: 2007-03-06 10:30:34 $
// Revision: $Revision: 1.2 $
//
//
#if !defined(KRATOS_BOUNDARY_CONDITIONS_AND_CONTACT_UTILITIES_INCLUDED )
#define KRATOS_BOUNDARY_CONDITIONS_AND_CONTACT_UTILITIES_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <algorithm>
#include <set>
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// External includes
// Project includes
#include "includes/define.h"
#include "includes/node.h"
#include "includes/element.h"
#include "includes/model_part.h"
#include "includes/mesh.h"
#include "geometries/geometry.h"
#include "geometries/point_2d.h"
#include "geometries/point_3d.h"
#include "geometries/line_2d_2.h"
#include "geometries/triangle_2d_3.h"
#include "geometries/tetrahedra_3d_4.h"
#include "spatial_containers/spatial_containers.h"
#include "spatial_containers/bounding_box.h"
#include "spatial_containers/cell.h"
#include "spatial_containers/bins_dynamic_objects.h"
#include "spatial_containers/bins_dynamic.h"
#include "spatial_containers/bins_static_objects.h"
#include "utilities/spatial_containers_configure.h"
#include "utilities/geometry_utilities.h"
#include "utilities/timer.h"
// #include "utilities/timer_CLabra.h"
#include "custom_conditions/slave_contact_point_2d.h"
#include "custom_conditions/slave_contact_point_3d.h"
#include "custom_conditions/master_contact_point_2d.h"
#include "custom_conditions/master_contact_face_2d.h"
#include "custom_conditions/master_contact_face_3D.h"
#include "custom_conditions/point_segment_contact_link.h"
#include "custom_conditions/point_point_contact_link.h"
#include "custom_conditions/contact_link_3D_explicit.h"
#include "geometries/plane.h"
#include "custom_utilities/segment_2d.h"
#include "custom_utilities/intersect_triangles_cases.h"
#include "processes/find_nodal_neighbours_process.h"
#include "processes/find_elements_neighbours_process.h"
#include "processes/find_conditions_neighbours_process.h"
#include "structural_application.h"
namespace Kratos
{
class BoundaryConditionsAndContactUtilities
{
public:
#define EPSILON 1.0e-10
#define NEPSILON -1.0e-10
#define BEPSILON 1.0e+15
static const int IT_POINT = 0;
static const int IT_SEGMENT = 1;
static const int IT_EMPTY = 2;
enum Exist_Node {no_nodes = 0, yes_nodes};
enum Near_Node {no_near = 0, yes_near};
enum Object {is_node = 0, is_object};
KRATOS_CLASS_POINTER_DEFINITION(BoundaryConditionsAndContactUtilities);
/// Utilities
typedef IntersectionSegment2DToSegment2D IntersectionSegments;
/// Elements
typedef ModelPart::ElementsContainerType ElementsArrayType;
/*
typedef ModelPart::ElementsContainerType::ContainerType ContainerType;
typedef ContainerType::value_type PointerType;
typedef ContainerType::iterator IteratorType;
typedef std::vector<PointerType>::iterator PointerTypeIterator;
typedef ContactPair<PointerType> ContactPairType;
typedef std::vector<ContactPairType> ContainerContactPair;
typedef ContainerContactPair::iterator IteratorContainerContactPair;
typedef ContainerContactPair::value_type PointerContainerContactPair;
*/
/// Conditions General
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ModelPart::ConditionsContainerType::ContainerType ConditionsContainerType;
typedef ConditionsContainerType::iterator ConditionsIteratorType;
typedef ConditionsContainerType::value_type ConditionsPointerType;
typedef ContactPair<ConditionsPointerType> ConditionsContactPairType;
typedef std::vector<ConditionsPointerType>::iterator ConditionsPointerTypeIterator;
typedef std::vector<ConditionsContactPairType> ConditionsContainerContactPair;
typedef ConditionsContainerContactPair::iterator ConditionsIteratorContainerContactPair;
typedef ConditionsContainerContactPair::value_type ConditionsPointerContainerContactPair;
/// Condition Especificas
typedef SlaveContactPoint2D SlaveContactPointType;
typedef MasterContactPoint2D MasterContactPointType;
typedef MasterContactFace2D MasterContactFaceType;
///Nodes and properties
typedef Node<3> NodeType;
typedef Node<3>::Pointer NodePointerType;
typedef Geometry<NodeType> GeometryType;
typedef GeometryType::PointsArrayType PointsArrayType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef Element::GeometryType GeomType;
typedef ModelPart::NodesContainerType::ContainerType NodesContainerType;
typedef NodesContainerType::iterator NodesIteratorType;
typedef Properties PropertiesType;
static const std::size_t space_dim = 2;
typedef SpatialContainersConfigure<space_dim> Configure;
typedef Configure::PointType PointType;
typedef PointType::CoordinatesArrayType CoordinatesArrayType;
typedef Configure::ContainerType ContainerType;
typedef Configure::PointerType PointerType;
typedef Configure::IteratorType IteratorType;
typedef Configure::ResultContainerType ResultContainerType;
typedef Configure::ResultPointerType ResultPointerType;
typedef Configure::ResultIteratorType ResultIteratorType;
typedef Configure::ContactPairType ContactPairType;
typedef Configure::ContainerContactType ContainerContactType;
typedef Configure::IteratorContactType IteratorContactType;
typedef Configure::PointerContactType PointerContactType;
typedef Configure::PointerTypeIterator PointerTypeIterator;
typedef ContainerContactType ContainerContactPair;
typedef IteratorContactType IteratorContainerContactPair;
typedef PointerContactType PointerContainerContactPair;
BoundaryConditionsAndContactUtilities() {}
BoundaryConditionsAndContactUtilities(ModelPart& model_part, const unsigned int& dimension, const double& penalty_factor) : mr_model_part(model_part), mrdimension(dimension)
{
mpenalty_factor = penalty_factor;
mcompute_boundary_contour = true;
}
virtual ~BoundaryConditionsAndContactUtilities() {}
//************************************************************************************
//************************************************************************************
// Crea las conciones de contacto valid for lagrage multiplier y setea los elementos
// que son parte del contorno
void CreateBoundaries(const unsigned int& initial_conditions_size)
{
KRATOS_TRY
Clear(initial_conditions_size);
if(mcompute_boundary_contour)
{
std::cout<<"CREATING MASTER SURFACES"<< std::endl;
if(mrdimension==2)
CalculateBoundaryContour2D(mMasterConditionsArray);
else
CalculateBoundaryContour3D(mMasterConditionsArray);
mcompute_boundary_contour = false;
}
return;
KRATOS_CATCH("")
}
//************************************************************************************
//************************************************************************************
/// this function use the potencial contact force concept
void ComputeContactForce()
{
KRATOS_TRY
if(mrdimension!= int(space_dim))
KRATOS_THROW_ERROR(std::logic_error, "The Dimension of Configure and ModelPart not iquals " , "");
IteratorType it_begin = mBoundaryElements.begin();
IteratorType it_end = mBoundaryElements.end();
//BinsObjectDynamic<Configure> rBinsObjectDynamic(it_begin, it_end);
//BinsObjectDynamic<Configure>* rBins = &rBinsObjectDynamic;
///Bins estatico
BinsObjectStatic<Configure> Bins(it_begin, it_end);
BinsObjectStatic<Configure>* rBins = &Bins;
const std::size_t MaxNumberOfResults = 1000;
std::size_t NumberOfResults = 0;
ResultIteratorType begin;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> partition;
CreatePartition(number_of_threads, mBoundaryElements.size(), partition);
//ContactPairType it_pair;
ResultContainerType Result(MaxNumberOfResults);
std::cout<<" PARTITION COMPUTING CONTACT CONDITIONS = " << number_of_threads << std::endl;
if(mrdimension==2)
{
#pragma omp parallel for firstprivate(NumberOfResults,Result) private(begin)
for(int k=0; k<number_of_threads; k++)
{
IteratorType it_begin = mBoundaryElements.begin() + partition[k];
IteratorType it_end = mBoundaryElements.begin() + partition[k+1];
for(IteratorType it =it_begin; it!=it_end; ++it)
{
begin = Result.begin();
NumberOfResults = rBins->SearchObjects(*it, begin, MaxNumberOfResults);
if(NumberOfResults!=0)
{
for(ResultIteratorType rthis = Result.begin(); rthis!= Result.begin() + NumberOfResults; rthis++)
{
if((*rthis)->GetValue(IS_TARGET)== false && (*it)->Id()!=(*rthis)->Id() && FiltratePairContacts(*it, *rthis)==true)
{
ComputeContactForce2D(*it, *rthis);
ComputeContactDampingForces(*it, *rthis);
}
}
(*it)->GetValue(IS_TARGET) = true;
}
}
}
}
else
{
#ifdef _OPENMP
double start_prod = omp_get_wtime();
#endif
#pragma omp parallel for firstprivate(NumberOfResults,Result) private(begin)
for(int k=0; k<number_of_threads; k++)
{
IteratorType it_begin = mBoundaryElements.begin() + partition[k];
IteratorType it_end = mBoundaryElements.begin() + partition[k+1];
for(IteratorType it =it_begin; it!=it_end; ++it)
{
//Result[k].clear();
//Result[k].reserve(100);
//rBinsObjectDynamic.SearchObjects(*it, Result[k]);
begin = Result.begin();
NumberOfResults = rBins->SearchObjects(*it, begin, MaxNumberOfResults);
//if(Result[k].size()!=0){
if(NumberOfResults!=0)
for(ResultIteratorType rthis = Result.begin(); rthis!= Result.begin() + NumberOfResults /*rthis!=Result[k].end()*/ ; rthis++)
{
if((*rthis)->GetValue(IS_TARGET)== false && (*it)->Id()!=(*rthis)->Id() && FiltratePairContacts(*it, *rthis)==true)
{
ComputeContactForce3D(*it, *rthis);
}
}
(*it)->GetValue(IS_TARGET) = true;
}
}
#ifdef _OPENMP
double stop_prod = omp_get_wtime();
std::cout <<" Time Calculating Forces Contact = " << stop_prod - start_prod << std::endl;
#endif
}
std::cout<<" FINISHING COMPUTE CONTACT CONDITIONS " << std::endl;
KRATOS_CATCH("")
}
void ComputeContactDampingForces(const PointerType& Target, const PointerType& Contactor)
{
KRATOS_TRY
typedef Element::GeometryType::Pointer GeometryPointer;
double dampT = (Target->GetProperties()[DAMPING_RATIO]);
double dampC = (Contactor->GetProperties()[DAMPING_RATIO]);
double damp = std::max(dampT, dampC);
const GeometryPointer& GTarget = (Target)->pGetGeometry();
const GeometryPointer& GContactor = (Contactor)->pGetGeometry();
double pen_vec_tar = mpenalty_factor * (Target->GetProperties()[YOUNG_MODULUS]);
double pen_vec_con = mpenalty_factor * (Contactor->GetProperties()[YOUNG_MODULUS]);
double pen = std::min(pen_vec_tar, pen_vec_con);
double massC = 0.00;
double massT = 0.00;
array_1d<double ,3> vel_T = ZeroVector(3);
array_1d<double ,3> vel_C = ZeroVector(3);
array_1d<double ,3> vel = ZeroVector(3);
for(unsigned int i = 0; i<(*GTarget).size(); i++)
{
massT+= (*GTarget)(i)->FastGetSolutionStepValue(NODAL_MASS);
noalias(vel_T) += (*GTarget)(i)->FastGetSolutionStepValue(VELOCITY);
}
for(unsigned int i = 0; i<(*GContactor).size(); i++)
{
massC+= (*GContactor)(i)->FastGetSolutionStepValue(NODAL_MASS);
noalias(vel_C) += (*GContactor)(i)->FastGetSolutionStepValue(VELOCITY);
}
double fact = (massT * massC * pen)/(massT + massC);
double Ccr = 2.00 * std::sqrt(fact);
array_1d<double ,3> normal_T = ZeroVector(3);
array_1d<double ,3> normal_C = ZeroVector(3);
array_1d<double ,3> Center_T = (GTarget)->Center();
array_1d<double ,3> Center_C = (GContactor)->Center();
noalias(normal_T) = Center_C - Center_T;
noalias(normal_C) = Center_T - Center_C;
noalias(normal_T) = (1.00/norm_2(normal_T)) * normal_T;
noalias(vel) = vel_T - vel_C;
double vrn = inner_prod(vel, normal_T);
double fnd = std::fabs(damp * Ccr * vrn);
for(unsigned int i = 0; i<(*GTarget).size(); i++)
{
array_1d<double, 3>& rhs = (*GTarget)(i)->FastGetSolutionStepValue(RHS);
noalias(rhs) = 0.33333333333333 * fnd * normal_T;
}
for(unsigned int i = 0; i<(*GContactor).size(); i++)
{
array_1d<double, 3>& rhs = (*GContactor)(i)->FastGetSolutionStepValue(RHS);
noalias(rhs) = 0.33333333333333 * fnd * normal_C;
}
KRATOS_CATCH("")
}
/* Triangle to Triangle */
//Compute the normal contact force
void ComputeContactForce2D(const PointerType& Target, const PointerType& Contactor)
{
KRATOS_TRY
const double R0 = 0.00;
const double R1 = 1.00;
const double R2 = 2.00;
const double RP5 = 0.50;
const double RP15 = 1.50;
// unsigned int icontact = 0;
unsigned int np = 0;
double a0,a1,a2,b0,b1,b2,c0,c1,c2,n0,n1,n2,fn,fna,fnb;
double pen,tmp,dmin2,smin,smax;
double small = EPSILON;
double nsmall= -EPSILON;
double big = BEPSILON;
array_1d<double,10> p;
array_1d<double,10> s;
array_1d<double,3> fx;
array_1d<double,3> fy;
array_1d<double,2> vol;
array_1d<array_1d<double,3>,2> rx;
array_1d<array_1d<double,3>,2> ry;
array_1d<array_1d<double,3>,2> nx;
array_1d<array_1d<double,3>,2> ny;
array_1d<array_1d<array_1d<double,3>,3>,2> d;
double vol2 = 0.00;
double pen_vec_tar = mpenalty_factor * (Target->GetProperties()[YOUNG_MODULUS]);
double pen_vec_con = mpenalty_factor * (Contactor->GetProperties()[YOUNG_MODULUS]);
pen = std::min(pen_vec_tar, pen_vec_con); ///penalty term
//Element::GeometryType& Tgeom = Target->GetGeometry();
//Element::GeometryType& Cgeom = Contactor->GetGeometry();
std::vector<Element::GeometryType::Pointer> Geom(2);
Geom[0] = Contactor->pGetGeometry();
Geom[1] = Target->pGetGeometry();
for(unsigned int i=0; i<2; i++)
{
for(unsigned int j=0; j<3; j++)
{
rx[i][j] = ((*Geom[i])(j))->X();
ry[i][j] = ((*Geom[i])(j))->Y();
}
}
for(unsigned int i=0; i<2; i++)
{
vol[i]=(rx[i][1]-rx[i][0])*(ry[i][2]-ry[i][0])- (ry[i][1]-ry[i][0])*(rx[i][2]-rx[i][0]);
//normales salientes no unitarias de las aristas de los elementos
unsigned int k = 0;
for(unsigned int j=0; j<3; j++)
{
k= j+1;
if(k>2) k=0;
nx[i][j]=ry[i][k]-ry[i][j];
ny[i][j]=rx[i][j]-rx[i][k];
}
}
//computing the tranformation of nodal coordinate to the local coordinate
for(unsigned int i=0; i<2; i++)
{
unsigned int j = i+1;
if(j>1) j=0;
for(unsigned int k=0; k<3; k++)
{
for(unsigned int l=0; l<3; l++)
{
d[i][k][l]=((rx[j][l]-rx[i][k])*nx[j][l]+ (ry[j][l]-ry[i][k])*ny[j][l])/vol[j];
}
}
}
dmin2=big;
/* main loop */
for(unsigned int it=0; it<2; it++)
{
unsigned int jt=it+1;
if(jt>1)jt=0;
noalias(fx) = ZeroVector(3);
noalias(fy) = ZeroVector(3);
vol2 = vol[jt]*vol[jt];
n0 = (nx[jt][0]*nx[jt][0]+ny[jt][0]*ny[jt][0])/(vol2);
n1 = (nx[jt][1]*nx[jt][1]+ny[jt][1]*ny[jt][1])/(vol2);
n2 = (nx[jt][2]*nx[jt][2]+ny[jt][2]*ny[jt][2])/(vol2);
for(unsigned int in=0; in<3; in++)
{
unsigned int jn=in+1;
if(jn>2)jn=0;
a0=d[it][in][0];
a1=d[it][in][1];
a2=d[it][in][2];
b0=d[it][jn][0];
b1=d[it][jn][1];
b2=d[it][jn][2];
c0=d[jt][0][in];
c1=d[jt][1][in];
c2=d[jt][2][in];
/* check if contact */
if((((c0>nsmall)&&(c1>nsmall)&&(c2>nsmall))||
((c0<small)&&(c1<small)&&(c2<small)))||
(((a0<small)&&(b0<small))||((a1<small)&&(b1<small))||
((a2<small)&&(b2<small))))
{
if((a0<=a1)&&(a0<=a2))
{
dmin2=std::min(dmin2,(a0*a0/n0));
}
else if((a1<=a0)&&(a1<=a2))
{
dmin2=std::min(dmin2,(a1*a1/n1));
}
else
{
dmin2=std::min(dmin2,(a2*a2/n2));
}
}
else
{
// icontact=it;
/* domain of contact */
smin=R0;
smax=R1;
if((a0<R0)&&(b0>small))smin=std::max(smin,(a0/(a0-b0)));
if((a1<R0)&&(b1>small))smin=std::max(smin,(a1/(a1-b1)));
if((a2<R0)&&(b2>small))smin=std::max(smin,(a2/(a2-b2)));
if((a0>small)&&(b0<R0))smax=std::min(smax,(a0/(a0-b0)));
if((a1>small)&&(b1<R0))smax=std::min(smax,(a1/(a1-b1)));
if((a2>small)&&(b2<R0))smax=std::min(smax,(a2/(a2-b2)));
if(smax>smin)
{
s[0]=smin;
p[0]=std::min((a0+smin*(b0-a0)),(a1+smin*(b1-a1)));
p[0]=std::min(p[0],(a2+smin*(b2-a2)));
np=1;
/* intermediate points */
tmp=b0-a0+a1-b1;
if((std::fabs(tmp))>small)
{
tmp=(a1-a0)/tmp;
if((tmp>smin)&&(tmp<smax)&&
((a0+tmp*(b0-a0))<(a2+tmp*(b2-a2))))
{
s[np]=tmp;
p[np]=a0+tmp*(b0-a0);
np=np+1;
}
}
tmp=b0-a0+a2-b2;
if((std::fabs(tmp))>small)
{
tmp=(a2-a0)/tmp;
if((tmp>smin)&&(tmp<smax)&&
((a0+tmp*(b0-a0))<(a1+tmp*(b1-a1))))
{
s[np]=tmp;
p[np]=a0+tmp*(b0-a0);
np=np+1;
}
}
tmp=b1-a1+a2-b2;
if((std::fabs(tmp))>small)
{
tmp=(a2-a1)/tmp;
if((tmp>smin)&&(tmp<smax)&&
((a1+tmp*(b1-a1))<(a0+tmp*(b0-a0))))
{
s[np]=tmp;
p[np]=a1+tmp*(b1-a1);
np=np+1;
}
}
s[np]=smax;
p[np]=std::min((a0+smax*(b0-a0)),(a1+smax*(b1-a1)));
p[np]=std::min(p[np],(a2+smax*(b2-a2)));
np=np+1;
/* order intermediate points */
for(unsigned ip=0; ip<(np-1); ip++)
{
for(unsigned int jp=(ip+1); jp<np; jp++)
{
if(s[ip]>s[jp])
{
tmp=s[jp];
s[jp]=s[ip];
s[ip]=tmp;
tmp=p[jp];
p[jp]=p[ip];
p[ip]=tmp;
}
}
}
/* integrate normal force */
fn=p[0]*(s[1]-s[0])+p[np-1]*(s[np-1]-s[np-2]);
fnb=p[0]*(s[1]-s[0])*(s[1]+R2*s[0])+
p[np-1]*(s[np-1]-s[np-2])*(s[np-2]+R2*s[np-1]);
for(unsigned int ip=1; ip<(np-1); ip++)
{
fn=fn+p[ip]*(s[ip+1]-s[ip-1]);
fnb=fnb+p[ip]*(
(s[ip]-s[ip-1])*(s[ip-1]+R2*s[ip])+
(s[ip+1]-s[ip])*(s[ip+1]+R2*s[ip]));
}
fnb=fnb*pen*RP5;
fn=fn*pen*RP15;
fna=fn-fnb;
/* update total force */
fx[in]=fx[in]-fna*nx[it][in];
fy[in]=fy[in]-fna*ny[it][in];
fx[jn]=fx[jn]-fnb*nx[it][in];
fy[jn]=fy[jn]-fnb*ny[it][in];
}
}
}
//if(icontact==it) /* update nodal forces */
{
Element::GeometryType& this_geom_1 = (*Geom[it]);
Element::GeometryType& this_geom_2 = (*Geom[jt]);
for(unsigned int in=0; in<3; in++)
{
array_1d<double,3>& node_rhs_1 = this_geom_1(in)->FastGetSolutionStepValue(RHS);
array_1d<double,3>& normal_1 = this_geom_1(in)->FastGetSolutionStepValue(NORMAL);
this_geom_1[in].SetLock();
node_rhs_1[0] += 0.50 * fx[in];
node_rhs_1[1] += 0.50 * fy[in];
node_rhs_1[2] = 0.00;
normal_1[0] += 0.50 * fx[in];
normal_1[1] += 0.50 * fy[in];
normal_1[2] = 0.00;
this_geom_1[in].UnSetLock();
unsigned int ie=in+1;
if(ie>2)ie=0;
for(unsigned int jn=0; jn<3; jn++)
{
array_1d<double,3>& node_rhs_2 = this_geom_2(in)->FastGetSolutionStepValue(RHS);
array_1d<double,3>& normal_2 = this_geom_2(in)->FastGetSolutionStepValue(NORMAL);
this_geom_2[jn].SetLock();
node_rhs_2[0] -= 0.50 * fx[jn]*d[it][jn][ie];
node_rhs_2[1] -= 0.50 * fy[jn]*d[it][jn][ie];
node_rhs_2[2] = 0.00;
normal_2[0] -= 0.50 * fx[jn]*d[it][jn][ie];
normal_2[1] -= 0.50 * fy[jn]*d[it][jn][ie];
normal_2[2] = 0.00;
this_geom_2[jn].UnSetLock();
}
}
}
}
KRATOS_CATCH("")
}
/* tetrahedra to tetrahedra */
void ComputeContactForce3D(const PointerType& Target, const PointerType& Contactor)
{
const double R0 = 0.00;
const double R1 = 1.00;
const double R2 = 2.00;
const double R5 = 5.00;
const double RP1 = 0.10;
const double RP25 = 0.25;
const double RP5 = 0.50;
double tmp,theigh,penetr,peneto,penetu,penetv,penalty;
double force,forco,uforc,vforc,factor,fact0,facti,fact1;
double xorig,yorig,zorig,xe[2],ye[2],ze[2],dct[4];
double dsc[6][3],dcs[3][6],us[6],vs[6],ub[10],vb[10],anb[10],penetb[10];
double xt[4],yt[4],zt[4],ut[4],vt[4],ft[4],xcent,ycent,zcent,xnt,ynt,znt;
double xc[4],yc[4],zc[4],uc[4],vc[4],fc[4],xcenc,ycenc,zcenc,xnc,ync,znc;
double /*zone2,dmin2,*/factor1;
// long /*kprop,icontact,ielem,jelem,icoup,jcoup,*/fnonzero;
long i,j,k,inext,jnext,itars,icons;
long nspoin,ninerc,niners,nbpoin,innerc[3],inners[6];
//long itarth,iconth;
// long iptn[4],ipcn[4];
// long iptn1[4],ipcn1[4],m;
NodePointerType ipt[4], ipc[4];
double pen_vec_tar = 50.00 * (Target->GetProperties()[YOUNG_MODULUS]);
double pen_vec_con = 50.00 * (Contactor->GetProperties()[YOUNG_MODULUS]);
Target->GetValue(IS_TARGET) = true;
penalty = std::min(pen_vec_tar, pen_vec_con); ///penalty term
Element::GeometryType& Tgeom = Target->GetGeometry();
Element::GeometryType& Cgeom = Contactor->GetGeometry();
//std::vector<Element::GeometryType::Pointer> Geom(2);
//Geom[0] = Target->pGetGeometry();
//Geom[1] = Contactor->pGetGeometry();
/*set centres of contactor and target object */
xcent=R0;
ycent=R0;
zcent=R0;
xcenc=R0;
ycenc=R0;
zcenc=R0;
for(i=0; i<4; i++)
{
xcenc=xcenc+RP25*(Cgeom(i)->X());
ycenc=ycenc+RP25*(Cgeom(i)->Y());
zcenc=zcenc+RP25*(Cgeom(i)->Z());
xcent=xcent+RP25*(Tgeom(i)->X());
ycent=ycent+RP25*(Tgeom(i)->Y());
zcent=zcent+RP25*(Tgeom(i)->Z());
}
/*********************************************************/
/* loop over target surfaces */
/*********************************************************/
//ipt->Guarda las conectividades del elemento
for(itars=0; itars<4; itars++)
{
ipt[0] = Tgeom(itars); //i2elto[itars][itarth];
// iptn1[0]= itars;
ipt[1] = Tgeom(1); //i2elto[1][itarth];
// iptn1[1]= 1;
ipt[2] = Tgeom(2); //i2elto[2][itarth];
// iptn1[2]= 2;
if(itars>0)
{
ipt[3] = Tgeom(itars-1); //i2elto[itars-1][itarth];
// iptn1[3]= itars-1;
}
else
{
ipt[3] = Tgeom(3); //i2elto[3][itarth];
// iptn1[3]= 3;
}
if((itars==1)||(itars==2))
{
ipt[1] = Tgeom(3); //i2elto[3][itarth];
// iptn1[1]=3;
}
if(itars>1)
{
ipt[2] = Tgeom(0); //i2elto[0][itarth];
// iptn1[2]= 0;
}
/*****************************************************/
/* loop over contactor surfaces */
/*****************************************************/
for(icons=0; icons<4; icons++)
{
ipc[0] = Cgeom(icons); //i2elto[icons][iconth];
// ipcn1[0] = icons;
ipc[1] = Cgeom(1); //i2elto[1][iconth];
// ipcn1[1] = 1;
ipc[2] = Cgeom(2); //i2elto[2][iconth];
// ipcn1[2] = 2;
if(icons>0)
{
ipc[3] = Cgeom(icons-1); //i2elto[icons-1][iconth];
// ipcn1[3] = icons-1;
}
else
{
ipc[3] = Cgeom(3); //i2elto[3][iconth];
// ipcn1[3] = 3;
}
if((icons==1)||(icons==2))
{
ipc[1] = Cgeom(3); //i2elto[3][iconth];
// ipcn1[1]= 3;
}
if(icons>1)
{
ipc[2] = Cgeom(0); //i2elto[0][iconth];
// ipcn1[2] = 0;
}
// for(m=0; m<4; m++)
// {
// iptn[iptn1[m]]=m;
// ipcn[ipcn1[m]]=m;
// }
/* set nodal coordinates */
for(i=0; i<3; i++)
{
xt[i] = ipt[i]->X();
yt[i] = ipt[i]->Y();
zt[i] = ipt[i]->Z();
xc[i] = ipc[i]->X();
yc[i] = ipc[i]->Y();
zc[i] = ipc[i]->Z();
}
xt[3]=xcent;
yt[3]=ycent;
zt[3]=zcent;
xc[3]=xcenc;
yc[3]=ycenc;
zc[3]=zcenc;
xorig=xc[0];
yorig=yc[0];
zorig=zc[0];
for(i=0; i<4; i++)
{
xt[i]=xt[i]-xorig;
yt[i]=yt[i]-yorig;
zt[i]=zt[i]-zorig;
xc[i]=xc[i]-xorig;
yc[i]=yc[i]-yorig;
zc[i]=zc[i]-zorig;
}
/* contactor normal, e-base and target points in e-base */
V3DCro(xnc,ync,znc,xc[1],yc[1],zc[1],xc[2],yc[2],zc[2]);
V3DNor(xe[0],xnc,ync,znc);
xe[0]=xc[1];
ye[0]=yc[1];
ze[0]=zc[1];
V3DNor(xe[1],xe[0],ye[0],ze[0]);
V3DCro(xe[1],ye[1],ze[1],xnc,ync,znc,xe[0],ye[0],ze[0]);
for(i=0; i<4; i++)
{
V3DDot(dct[i],xnc,ync,znc,xt[i],yt[i],zt[i]);
V3DDot(ut[i],xt[i],yt[i],zt[i],xe[0],ye[0],ze[0]);
V3DDot(vt[i],xt[i],yt[i],zt[i],xe[1],ye[1],ze[1]);
}
/* u,v coordinates of S-points and C-points */
nspoin=0;
for(i=0; i<3; i++)
{
for(j=0; j<2; j++)
{
inext=i+1;
if(inext>2)inext=0;
if(j==0)inext=3;
if(((dct[i]>EPSILON)&&(dct[inext]<NEPSILON))|| ((dct[i]<NEPSILON)&&(dct[inext]>EPSILON)))
//Modified by JXiang
{
factor=std::fabs(dct[i]-dct[inext]);
if(factor>EPSILON)
{
factor=std::fabs(dct[i]/factor);
us[nspoin]=factor*ut[inext]+(R1-factor)*ut[i];
vs[nspoin]=factor*vt[inext]+(R1-factor)*vt[i];
inners[nspoin]=0;
nspoin=nspoin+1;
}
}
}
}
if((nspoin<3)||(nspoin>4))continue;
/* check odering of S-points */
if(((us[1]-us[0])*(vs[2]-vs[0])-(vs[1]-vs[0])*(us[2]-us[0]))<R0)
{
i=0;
j=nspoin-1;
while(i<j)
{
k=inners[i];
inners[i]=inners[j];
inners[j]=k;
tmp=us[i];
us[i]=us[j];
us[j]=tmp;
tmp=vs[i];
vs[i]=vs[j];
vs[j]=tmp;
i++;
j--;
}
}
for(i=0; i<3; i++)
{
V3DDot(uc[i],xc[i],yc[i],zc[i],xe[0],ye[0],ze[0]);
V3DDot(vc[i],xc[i],yc[i],zc[i],xe[1],ye[1],ze[1]);
innerc[i]=0;
}
/* distances of C-points from S edges */
niners=0;
ninerc=0;
for(i=0; i<nspoin; i++)
{
inext=i+1;
if(inext>=nspoin)inext=0;
for(j=0; j<3; j++)
{
jnext=j+1;
if(jnext>2)jnext=0;
dcs[j][i]=(uc[jnext]-uc[j])*(vs[i]-vc[j])-(vc[jnext]-vc[j])*(us[i]-uc[j]);
dsc[i][j]=(us[inext]-us[i])*(vc[j]-vs[i])-(vs[inext]-vs[i])*(uc[j]-us[i]);
if(dsc[i][j]>=R0)
{
innerc[j]=innerc[j]+1;
if(innerc[j]==nspoin) ninerc=ninerc+1;
}
if(dcs[j][i]>=R0)
{
inners[i]=inners[i]+1;
if(inners[i]==3) niners = niners+1;
}
}
}
/* B-points */
if(ninerc==3) /* triangle inside poligon */
{
nbpoin=3;
for(i=0; i<nbpoin; i++)
{
ub[i]=uc[i];
vb[i]=vc[i];
}
}
else if(niners==nspoin) /* poligon inside triangle */
{
nbpoin=nspoin;
for(i=0; i<nbpoin; i++)
{
ub[i]=us[i];
vb[i]=vs[i];
}
}
else /* intersection points poligon triangle */
{
nbpoin=0;
for(i=0; i<nspoin; i++)
{
if(inners[i]==3)
{
ub[nbpoin]=us[i];
vb[nbpoin]=vs[i];
nbpoin++;
}
}
for(i=0; i<3; i++) /* grab inner C-points */
{
if(innerc[i]==nspoin)
{
ub[nbpoin]=uc[i];
vb[nbpoin]=vc[i];
nbpoin++;
}
}
for(i=0; i<nspoin; i++) /* intersection points */
{
inext=i+1;
if(inext>=nspoin)inext=0;
for(j=0; j<3; j++)
{
jnext=j+1;
if(jnext>2)jnext=0;
if((((dsc[i][j]>EPSILON)&&(dsc[i][jnext]<NEPSILON))||
((dsc[i][j]<NEPSILON)&&(dsc[i][jnext]>EPSILON)))&&
(((dcs[j][i]>EPSILON)&&(dcs[j][inext]<NEPSILON))||
((dcs[j][i]<NEPSILON)&&(dcs[j][inext]>EPSILON))))
//modified by JXiang
{
factor=std::fabs(dsc[i][j]-dsc[i][jnext]);
if(factor<EPSILON)
{
factor=RP5;
}
else
{
factor=std::fabs(dsc[i][j]/factor);
}
ub[nbpoin]=(R1-factor)*uc[j]+factor*uc[jnext];
vb[nbpoin]=(R1-factor)*vc[j]+factor*vc[jnext];
nbpoin++;
}
}
}
for(i=1; i<nbpoin; i++)
{
if(vb[i]<vb[0])
{
tmp=vb[i];
vb[i]=vb[0];
vb[0]=tmp;
tmp=ub[i];
ub[i]=ub[0];
ub[0]=tmp;
}
}
for(i=1; i<nbpoin; i++)
{
tmp=ub[i]-ub[0];
if((tmp<R0)&&(tmp>(-EPSILON)))
{
tmp=tmp-EPSILON;
}
else if((tmp>=R0)&&(tmp<EPSILON))
{
tmp=tmp+EPSILON;
}
anb[i]=(vb[i]-vb[0]+EPSILON)/tmp;
}
for(i=1; i<nbpoin; i++) /* sort B-points */
{
for(j=i+1; j<nbpoin; j++)
{
if(((anb[i]>=R0)&&(anb[j]>=R0)&&(anb[j]<anb[i]))||
((anb[i]<R0)&&((anb[j]>=R0)||(anb[j]<anb[i]))))
{
tmp=vb[i];
vb[i]=vb[j];
vb[j]=tmp;
tmp=ub[i];
ub[i]=ub[j];
ub[j]=tmp;
tmp=anb[i];
anb[i]=anb[j];
anb[j]=tmp;
}
}
}
}
if(nbpoin<3)continue;
/* Target-plain normal and penetration at B-points */
V3DCro(xnt,ynt,znt,xt[1]-xt[0],yt[1]-yt[0],zt[1]-zt[0],xt[2]-xt[0],yt[2]-yt[0],zt[2]-zt[0]);
V3DDot(theigh,xt[3]-xt[0],yt[3]-yt[0],zt[3]-zt[0],xnt,ynt,znt);
/* penetration at origin of the e-base and dp/du dp/dv; */
V3DDot(peneto,xc[0]-xt[0],yc[0]-yt[0],zc[0]-zt[0],xnt,ynt,znt);
V3DDot(penetu,xe[0],ye[0],ze[0],xnt,ynt,znt);
V3DDot(penetv,xe[1],ye[1],ze[1],xnt,ynt,znt);
peneto=peneto/theigh;
penetu=penetu/theigh;
penetv=penetv/theigh;
for(i=0; i<nbpoin; i++)
{
penetb[i]=peneto+ub[i]*penetu+vb[i]*penetv;
}
/* force and center of force */
forco=R0;
uforc=R0;
vforc=R0;
for(i=1; i<(nbpoin-1); i++)
{
penetr=penetb[0]+penetb[i]+penetb[i+1];
if(penetr>EPSILON)
{
force=((ub[i]-ub[0])*(vb[i+1]-vb[0])-(vb[i]-vb[0])*(ub[i+1]-ub[0]))*penetr*penalty;
fact0=(RP5*penetb[0]+RP25*(penetb[i]+penetb[i+1]))/penetr;
facti=(RP5*penetb[i]+RP25*(penetb[0]+
penetb[i+1]))/penetr;
fact1=R1-fact0-facti;
if(std::fabs(force+forco)>EPSILON)
{
uforc=(forco*uforc+force*(fact0*ub[0]+
facti*ub[i]+fact1*ub[i+1]))/(forco+force);
vforc=(forco*vforc+force*(fact0*vb[0]+
facti*vb[i]+fact1*vb[i+1]))/(forco+force);
forco=forco+force;
}
}
}
/*resultant at C-points */
for(i=0; i<4; i++)
{
fc[i]=R0;
ft[i]=R0;
}
tmp=((uc[1]-uc[0])*(vc[2]-vc[0])-
(vc[1]-vc[0])*(uc[2]-uc[0]));
for(i=0; i<3; i++)
{
j=i+1;
if(j>2)j=0;
k=j+1;
if(k>2)k=0;
fc[k]=forco*(((uc[j]-uc[i])*(vforc-vc[i])-
(vc[j]-vc[i])*(uforc-uc[i]))/tmp);
}
/*resultant at T-points*/
tmp=((ut[1]-ut[0])*(vt[2]-vt[0])-(vt[1]-vt[0])*(ut[2]-ut[0]));
inext=-1;
if(std::fabs(tmp)<RP1*theigh)
{
inext=0;
tmp=std::fabs(ut[1]-ut[0])+std::fabs(vt[1]-vt[0]);
for(i=0; i<3; i++)
{
j=i+1;
if(j>2)j=0;
if(tmp>(std::fabs(ut[j]-ut[i])+std::fabs(vt[j]-vt[i])))
{
tmp=std::fabs(ut[j]-ut[i])+std::fabs(vt[j]-vt[i]);
inext=i;
}
}
j=inext+1;
if(j>2)j=0;
if(std::fabs(zt[j])>std::fabs(zt[inext]))inext=j;
j=inext+1;
if(j>2)j=0;
k=j+1;
if(k>2)k=0;
tmp=(ut[k]-ut[j])*(vt[3]-vt[j])-
(vt[k]-vt[j])*(ut[3]-ut[j]);
}
for(jnext=0; jnext<3; jnext++)
{
i=jnext;
j=i+1;
if(j>2)j=0;
k=j+1;
if(k>2)k=0;
if(i==inext)i=3;
if(j==inext)j=3;
if(k==inext)k=3;
ft[k]=forco*(((ut[j]-ut[i])*(vforc-vt[i])-
(vt[j]-vt[i])*(uforc-ut[i]))/tmp);
}
ft[3]=RP25*ft[3];
for(i=0; i<3; i++)
{
ft[i]=ft[i]+ft[3];
}
/* add forces into global vector */
factor1=R2/R5;
// fnonzero=1;
for(i=0; i<4; i++)
{
array_1d<double,3>& node_rhs_1 = (ipc[i])->FastGetSolutionStepValue(RHS);
array_1d<double,3>& node_rhs_2 = (ipt[i])->FastGetSolutionStepValue(RHS);
node_rhs_1[0] += fc[i]*xnc*factor1;
node_rhs_1[1] += fc[i]*ync*factor1;
node_rhs_1[2] += fc[i]*znc*factor1;
node_rhs_2[0] -= ft[i]*xnc*factor1;
node_rhs_2[1] -= ft[i]*ync*factor1;
node_rhs_2[2] -= ft[i]*znc*factor1;
}
}
}
}
//************************************************************************************
//************************************************************************************
// Funcion que se llama antes de la acualizacion de los desplazamientos
void LocalSearch()
{
KRATOS_TRY
IteratorType it_begin = mBoundaryElements.begin();
IteratorType it_end = mBoundaryElements.end();
BinsObjectDynamic<Configure> rBinsObjectDynamic(it_begin, it_end );
if(mrdimension==2)
{
SearchNearNode2D(rBinsObjectDynamic, it_begin, it_end);
LocalSearch2D(rBinsObjectDynamic, it_begin, it_end);
}
else
LocalSearch3D(rBinsObjectDynamic, it_begin, it_end);
KRATOS_CATCH("")
}
//************************************************************************************
//************************************************************************************
bool SearchContactsPairs()
{
KRATOS_TRY
std::cout<< std::endl;
std::cout<<" COMPUTING CONTACT CONDITIONS TO MODEL PART " << std::endl;
IteratorType it_begin = mBoundaryElements.begin();
IteratorType it_end = mBoundaryElements.end();
BinsObjectDynamic<Configure> rBinsObjectDynamic(it_begin, it_end );
rBinsObjectDynamic.SearchContact(mPairContacts);
if(mrdimension==2)
{
LocalSearch2D(rBinsObjectDynamic, it_begin, it_end);
FiltratePairContacts2D(mPairContacts);
}
else
{
//LocalSearch3D(rBinsObjectDynamic, it_begin, it_end);
//std::cout<< " NUMBER OF CONTACT PAIRS = " <<mPairContacts.size()<<std::endl;
//FiltratePairContacts3D(mPairContacts);
std::cout<< " NUMBER OF CONTACT PAIRS = " <<mPairContacts.size()<<std::endl;
}
if(mPairContacts.size()!=0)
{
std::cout<< " NUMBER OF CONTACT PAIRS = " <<mPairContacts.size()<<std::endl;
//KRATOS_THROW_ERROR(std::logic_error, "GetValue", "");
return true;
}
std::cout<< " NO CONTACTS PAIRS "<<std::endl;
return false;
KRATOS_CATCH("")
}
//************************************************************************************
//************************************************************************************
void ResetValues()
{
KRATOS_TRY
NodesArrayType& pNodes = mr_model_part.Nodes();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
i->GetValue(IS_CONTACT_SLAVE) = 0;
i->GetValue(IS_CONTACT_MASTER) = 0;
i->GetValue(NODAL_VALUES) = 0;
i->GetValue(DISTANCE) = DBL_MAX;
}
}
KRATOS_CATCH("")
}
//************************************************************************************
//************************************************************************************
void Clear(const unsigned int& initial_conditions_size)
{
KRATOS_TRY
NodesArrayType& pNodes = mr_model_part.Nodes();
ElementsArrayType& pElements = mr_model_part.Elements();
ConditionsArrayType& pConditions = mr_model_part.Conditions();
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
i->GetValue(IS_CONTACT_SLAVE) = 0;
i->GetValue(IS_CONTACT_MASTER) = 0;
i->GetValue(NODAL_VALUES) = 0;
i->GetValue(DISTANCE) = DBL_MAX;
i->GetValue(NEAR_NODE) = *(i.base());
}
}
//No se han producido nuevas condiciones de contorno
if(mcompute_boundary_contour==true)
{
/// Borro las condiciones masters
if(pConditions.size()>initial_conditions_size)
{
ModelPart::ConditionIterator end_previos = pConditions.begin() + initial_conditions_size;
ModelPart::ConditionIterator end_actual = pConditions.end();
pConditions.erase(end_previos, end_actual);
}
FindElementalNeighboursProcess ElementosVecinos(mr_model_part, mrdimension, 10);
FindNodalNeighboursProcess NodosVecinos(mr_model_part, mrdimension, 10);
FindConditionsNeighboursProcess CondicionesVecinas(mr_model_part, mrdimension, 10);
ElementosVecinos.ClearNeighbours();
NodosVecinos.ClearNeighbours();
CondicionesVecinas.ClearNeighbours();
ElementosVecinos.Execute();
NodosVecinos.Execute();
CondicionesVecinas.Execute();
vector<unsigned int> condition_partition;
CreatePartition(number_of_threads, pConditions.size(), condition_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
ConditionsArrayType::iterator it_begin=pConditions.ptr_begin()+condition_partition[k];
ConditionsArrayType::iterator it_end=pConditions.ptr_begin()+condition_partition[k+1];
for (ConditionsArrayType::iterator it= it_begin; it!=it_end; ++it)
{
WeakPointerVector<Element>& rC = it->GetValue(NEIGHBOUR_ELEMENTS);
rC.erase(rC.begin(),rC.end() );
}
}
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
ElementsArrayType::iterator it_begin=pElements.ptr_begin()+element_partition[k];
ElementsArrayType::iterator it_end=pElements.ptr_begin()+element_partition[k+1];
for (ElementsArrayType::iterator it= it_begin; it!=it_end; ++it)
{
WeakPointerVector<Condition> & neighb_conds = it->GetValue(NEIGHBOUR_CONDITIONS);
for (WeakPointerVector< Condition >::iterator neighb_cond = neighb_conds.begin(); neighb_cond != neighb_conds.end(); neighb_cond++)
(neighb_cond->GetValue(NEIGHBOUR_ELEMENTS)).push_back( *(it.base()));
}
}
mPairContacts.clear();
mBoundaryElements.clear();
mMasterConditionsArray.clear();
}
KRATOS_CATCH("")
}
//************************************************************************************
//************************************************************************************
void CreateLinkingConditionsBasedOnLocalSearch(const unsigned int& initial_conditions_size)
{
KRATOS_TRY
if(mrdimension==2)
CreateLinkingConditionsBasedOnLocalSearch2D(initial_conditions_size);
else
CreateLinkingConditionsBasedOnLocalSearch3D(initial_conditions_size);
KRATOS_CATCH("")
}
//************************************************************************************
//************************************************************************************
void CreateLinkingConditionsBasedOnLocalSearch2D(const unsigned int& initial_conditions_size)
{
KRATOS_TRY
ConditionsArrayType& rConditions = mr_model_part.Conditions();
IntersectTriangleCases<Configure> IntersectTriangles(mr_model_part);
array_1d<NodePointerType, 2> Ids;
std::vector<NodePointerType> InsideNodes;
std::vector<array_1d<NodePointerType, 2 > > Ids_2;
std::vector<Near_Node> Is_Near;
std::vector<ConditionsArrayType> LinkingConditions;
unsigned int Id = rConditions.size() + 1;
unsigned int properties_index = mr_model_part.NumberOfProperties();
PropertiesType::Pointer tempProperties = PropertiesType::Pointer(new PropertiesType(properties_index+1));
//mr_model_part.AddProperties(tempProperties);
int Case = 0;
unsigned int master = 0;
unsigned int slave = 1;
//bool is_repited = false;
//bool corner = false;
bool Change = true;
// Near_Node Near = no_near;
Exist_Node Exist = no_nodes;
NodePointerType Id_Node_Case_5;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
//std::vector<ResultContainerType> Result(number_of_threads);
ResultContainerType Result;
/// creando bins de objetos
BinsObjectDynamic<Configure> rBinsObjectDynamic(mBoundaryElements.begin(), mBoundaryElements.end());
///bins de puntos
//BinsDynamic<2, NodeType, NodesContainerType> BinsPoint(mBoundaryNodes.begin(), mBoundaryNodes.end());
/// busqueda local de los segmentos mas cercanos a un nodo
//SearchNearNode2D();
SearchNearNode2D(rBinsObjectDynamic, mBoundaryElements.begin(), mBoundaryElements.end());
IdentifyMasterSegment2D();
//LocalSearch2D(rBinsObjectDynamic, mBoundaryElements.begin(), mBoundaryElements.end());
LinkingConditions.resize(number_of_threads);
vector<unsigned int> partition;
CreatePartition(number_of_threads, mBoundaryElements.size(), partition);
ContactPairType it_pair;
std::cout<<" PARTITION COMPUTING CONTACT CONDITIONS = " << number_of_threads << std::endl;
std::cout<<" NUMBER OF INITIAL CONDITIONS = " << initial_conditions_size << std::endl;
std::cout<<" NUMBER OF MASTER SURFACES CONDITIONS = " << rConditions.size()-initial_conditions_size << std::endl;
// #pragma omp parallel for firstprivate (Case, Id_Node_Case_5, master, slave, /*is_repited, corner,*/ Change, Near, Exist) private (Result, Id, it_pair, Ids, InsideNodes, Ids_2, Is_Near)
#pragma omp parallel for firstprivate (Case, Id_Node_Case_5, master, slave, Change, Exist) private (Result, Id, it_pair, Ids, InsideNodes, Ids_2, Is_Near)
for(int k=0; k<number_of_threads; k++)
{
IteratorType it_begin = mBoundaryElements.begin() + partition[k];
IteratorType it_end = mBoundaryElements.begin() + partition[k+1];
for(IteratorType it =it_begin; it!=it_end; it++)
{
Result.clear();
Result.reserve(100);
(*it)->GetValue(IS_TARGET)=true;
rBinsObjectDynamic.SearchObjects(*it, Result);
if(Result.size()!=0)
{
for(ResultIteratorType rthis = Result.begin(); rthis!=Result.end(); ++rthis)
{
if(FiltratePairContacts(*it, *rthis)==true)
{
Exist = no_nodes;
Case = 0;
master = 0;
slave = 1;
(it_pair)[master] = (*rthis);
(it_pair)[slave] = (*it);
Ids_2.clear();
InsideNodes.clear();
NodeInside((it_pair)[0], (it_pair)[1], InsideNodes);
if(InsideNodes.size()==0)
{
InsideNodes.clear();
ContactPairType it_pair_2;
(it_pair_2)[master] = (*it);
(it_pair_2)[slave] = (*rthis);
NodeInside((it_pair_2)[0], (it_pair_2)[1], InsideNodes);
if(InsideNodes.size()==0)
{
Exist = no_nodes;
}
else
{
Exist = yes_nodes;
continue;
}
}
else
Exist = yes_nodes;
switch(Exist)
{
case(yes_nodes):
{
Case = IntersectTriangles.LocateCaseItersection(Id_Node_Case_5, Change, InsideNodes, (it_pair)[master], (it_pair)[slave]);
switch(Case)
{
/*
case 1: /// un solo nodo dentro
{
Near = CheckNearNodes(master, slave, InsideNodes[0], (it_pair)[master], Ids);
if(CreateLinkingConditions(Id, master, slave, Ids, it_pair, tempProperties, Exist, LinkingConditions[k])){
Id++;
}
break;
}
*/
case 1 :
case 2: /// dos nodos dentro
{
for(unsigned int in = 0; in<InsideNodes.size(); in++)
{
{
// Near = CheckNearNodes(master, slave, InsideNodes[in], (it_pair)[master], Ids);
if(CreateLinkingConditions(Id, master, slave, Ids, it_pair, tempProperties, Exist, LinkingConditions[k]))
{
Id++;
}
}
}
break;
}
case 3:
{
break;
}
case 5:
{
break;
}
}
break;
}
case(no_nodes):
{
ComputeContactForce2D(((it_pair)[slave]), ((it_pair)[master]));
/*
///Penalty
unsigned int size_master = ((it_pair)[master])->GetValue(NEIGHBOUR_CONDITIONS).size();
unsigned int size_slave = ((it_pair)[slave])->GetValue(NEIGHBOUR_CONDITIONS).size();
if(size_master==1 && size_slave==1)
{
//std::cout<< " MASTER OBJECT = " << (it_pair)[master]->Id() <<" SLAVE OBJECT = " << (it_pair)[slave]->Id() << std::endl;
CheckNearNodes(master, slave, (it_pair)[slave], (it_pair)[master], Ids_2, Is_Near);
//KRATOS_WATCH(Ids_2.size())
if(CreateLinkingConditions(Id, master, slave, Ids_2[0], it_pair, tempProperties, Exist, LinkingConditions[k])){
Id++;
}
}
else if(size_master>1 || size_slave>1)
{
ComputeContactForce2D(((it_pair)[slave]), ((it_pair)[master]));
}
*/
break;
}
}
}
}
}
}
}
unsigned int size = 0;
//adding linking to model_part
for(int k=0; k<number_of_threads; k++)
{
size+=LinkingConditions[k].size();
for(ConditionsArrayType::ptr_iterator it=LinkingConditions[k].ptr_begin(); it!= LinkingConditions[k].ptr_end(); ++it )
mr_model_part.Conditions().push_back(*it);
}
std::cout<<" NUMBER OF LINKING CONTACT CONDITIONS = " << size << std::endl;
std::cout<<" TOTAL NUMBER CONDITIONS = " << rConditions.size() << std::endl;
LinkingConditions.clear();
KRATOS_CATCH("")
}
//************************************************************************************
//************************************************************************************
void CreateLinkingConditionsBasedOnLocalSearch3D(const unsigned int& initial_conditions_size)
{
KRATOS_TRY
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
ConditionsArrayType& rConditions = mr_model_part.Conditions();
array_1d<NodePointerType, 2 > Ids;
std::vector<NodePointerType> InsideNodes;
std::vector<array_1d<unsigned int, 2 > > Ids_2;
std::vector<Near_Node> Is_Near;
std::vector<ConditionsArrayType> LinkingConditions(number_of_threads);
unsigned int Id = rConditions.size() + 1;
unsigned int properties_index = mr_model_part.NumberOfProperties();
PropertiesType::Pointer tempProperties = PropertiesType::Pointer(new PropertiesType(properties_index+1) );
//mr_model_part.AddProperties(tempProperties);
int Case = 0;
// unsigned int Id_Node_Case_5 = 0;
unsigned int master = 0;
unsigned int slave = 1;
// bool is_repited = false;
// bool corner = false;
// bool Change = true;
// Near_Node Near = no_near;
Exist_Node Exist = no_nodes;
ResultContainerType Result;
#ifdef _OPENMP
double start_prod = omp_get_wtime();
#endif
BinsObjectDynamic<Configure> rBinsObjectDynamic(mBoundaryElements.begin(), mBoundaryElements.end());
#ifdef _OPENMP
double stop_prod = omp_get_wtime();
std::cout << " Time creating bins = " << stop_prod - start_prod << " seconds" <<std::endl;
#endif
LocalSearch3D(rBinsObjectDynamic, mBoundaryElements.begin(), mBoundaryElements.end());
vector<unsigned int> partition;
CreatePartition(number_of_threads, mBoundaryElements.size(), partition);
ContactPairType it_pair;
std::cout<<" Number of threads used for contact = " << number_of_threads << std::endl;
std::cout<<" Number of initial conditions = " << initial_conditions_size << std::endl;
std::cout<<" Number of master surface conditions = " << rConditions.size()-initial_conditions_size << std::endl;
#ifdef _OPENMP
double start = omp_get_wtime();
#endif
#pragma omp parallel for shared(LinkingConditions) private(Id, it_pair, Ids, InsideNodes, Ids_2, Is_Near, Case, master, slave, Exist, Result)
// #pragma omp parallel for shared(LinkingConditions) private(Id, it_pair, Ids, InsideNodes, Ids_2, Is_Near, Case, Id_Node_Case_5, master, slave, is_repited, corner, Change, Near, Exist, Result)
for(int k=0; k<number_of_threads; k++)
{
IteratorType it_begin = mBoundaryElements.begin() + partition[k];
IteratorType it_end = mBoundaryElements.begin() + partition[k+1];
for(IteratorType it =it_begin; it!=it_end; it++)
{
Result.clear();
Result.reserve(100);
rBinsObjectDynamic.SearchObjects(*it, Result);
if(Result.size()!=0)
{
for(ResultIteratorType rthis = Result.begin(); rthis!=Result.end(); rthis++)
{
if(FiltratePairContacts(*it, *rthis)==true)
{
Exist = no_nodes;
Case = 0;
master = 0;
slave = 1;
(it_pair)[master] = (*rthis);
(it_pair)[slave] = (*it);
Ids_2.clear();
InsideNodes.clear();
NodeInside((it_pair)[0], (it_pair)[1], InsideNodes);
if(InsideNodes.size()==0)
{
InsideNodes.clear();
ContactPairType it_pair_2;
(it_pair_2)[master] = (*it);
(it_pair_2)[slave] = (*rthis);
NodeInside((it_pair_2)[0], (it_pair_2)[1], InsideNodes);
if(InsideNodes.size()==0)
{
Exist = no_nodes;
}
else
{
Exist = yes_nodes;
continue;
}
}
else
Exist = yes_nodes;
switch(Exist)
{
case(yes_nodes):
{
//std::cout<< " Yes Nodes" << std::endl;
//std::cout<< " MASTER OBJECT = " << (it_pair)[master]->Id() <<" SLAVE OBJECT = " << (it_pair)[slave]->Id() << std::endl;
Case = InsideNodes.size();
switch(Case)
{
/*
case 1: // un solo nodo dentro
{
(InsideNodes[0])->GetValue(IS_CONTACT_SLAVE) = 1;
Near = CheckNearNodes(master, slave, InsideNodes[0], (it_pair)[master], Ids);
CreateLinkingConditions(Id, master, slave, Ids, it_pair, tempProperties, Exist, LinkingConditions[k]);
Id++;
break;
}
*/
case 1:
case 2:
case 3:
{
for(unsigned int in = 0; in<InsideNodes.size(); in++)
{
if(InsideNodes[in]->GetValue(IS_CONTACT_SLAVE)==0)
{
InsideNodes[in]->GetValue(IS_CONTACT_SLAVE) = 1;
// Near = CheckNearNodes(master, slave, InsideNodes[in], (it_pair)[master], Ids);
CreateLinkingConditions(Id, master, slave, Ids, it_pair, tempProperties, Exist, LinkingConditions[k] );
Id++;
}
}
break;
}
case 5:
{
break;
}
}
break;
}
case(no_nodes):
{
break;
}
}
Exist = no_nodes;
// Near = no_near;
// corner = false;
// Change = true;
// is_repited = false;
Case = 0;
// Id_Node_Case_5 = 0;
master = 0;
slave = 1;
Ids_2.clear();
Is_Near.clear();
InsideNodes.clear();
}
}
}
}
}
#ifdef _OPENMP
double stop = omp_get_wtime();
std::cout << " Time Creating Linking Conditions = " << stop - start << " seconds" << std::endl;
#endif
int rId = rConditions.size() + 1;
for(int k=0; k<number_of_threads; k++)
{
for(ConditionsArrayType::ptr_iterator it=LinkingConditions[k].ptr_begin(); it!= LinkingConditions[k].ptr_end(); ++it )
{
(*it)->SetId(rId);
rId++;
}
}
unsigned int size = 0;
for(int k=0; k<number_of_threads; k++)
{
size+=LinkingConditions[k].size();
for(ConditionsArrayType::ptr_iterator it=LinkingConditions[k].ptr_begin(); it!= LinkingConditions[k].ptr_end(); ++it )
mr_model_part.Conditions().push_back(*it);
}
std::cout<<" Number of linking conditions = " << size << std::endl;
std::cout<<" Total number of conditions = " << rConditions.size() << std::endl;
LinkingConditions.clear();
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
///Permite decidir si el nodo que no esta dentro de un elemento es el correcto para ser el slave
void VerifyCorrectSlaveNode(unsigned int& master,
unsigned int& slave,
const array_1d<NodePointerType, 2 >& Ids)
{
const unsigned int master_aux = master;
const unsigned int slave_aux = slave;
array_1d<double, 2> Distances;
array_1d<double, 2> Points0;
array_1d<double, 2> Points1;
array_1d<double, 2> Points2;
WeakPointerVector<Condition>& neighb_cond_slave = (Ids[slave_aux])->GetValue(NEIGHBOUR_CONDITIONS);
WeakPointerVector<Condition>& neighb_cond_master = (Ids[master_aux])->GetValue(NEIGHBOUR_CONDITIONS);
Segment2D rSegment;
Points0[0] = (Ids[slave_aux])->X();
Points0[1] = (Ids[slave_aux])->Y();
double distance = DBL_MAX;
double compare_distance = 0.00;
for(WeakPointerVector<Condition>::iterator neighb = neighb_cond_master.begin(); neighb!= neighb_cond_master.end(); neighb++)
{
Condition::GeometryType& geom_2 = (neighb)->GetGeometry();
Points1[0] = geom_2[0].X();
Points1[1] = geom_2[0].Y();
Points2[0] = geom_2[1].X();
Points2[1] = geom_2[1].Y();
rSegment.AssignPointsAndComputeParameters(Points1, Points2);
compare_distance = rSegment.DistPoint2Segment2D(Points0);
if(compare_distance<distance)
{
distance = compare_distance;
}
}
Distances[0] = distance;
Points0[0] = (Ids[master_aux])->X();
Points0[1] = (Ids[master_aux])->Y();
distance = DBL_MAX;
compare_distance = 0.00;
for(WeakPointerVector<Condition>::iterator neighb = neighb_cond_slave.begin(); neighb!= neighb_cond_slave.end(); neighb++)
{
Condition::GeometryType& geom_2 = (neighb)->GetGeometry();
Points1[0] = geom_2[0].X();
Points1[1] = geom_2[0].Y();
Points2[0] = geom_2[1].X();
Points2[1] = geom_2[1].Y();
rSegment.AssignPointsAndComputeParameters(Points1, Points2);
compare_distance = rSegment.DistPoint2Segment2D(Points0);
if(compare_distance<distance)
{
distance = compare_distance;
}
}
Distances[1] = distance;
if( Distances[1]< Distances[0])
{
master = slave_aux;
slave = master_aux;
}
}
//*****************************************************************************************************
//*****************************************************************************************************
void CreatePointLinkingConditions(
const unsigned int& master,
const unsigned int& slave,
const array_1d<NodePointerType, 2 >& Ids,
const ContactPairType& it_pair,
const PropertiesType::Pointer& tempProperties,
const unsigned int& Id,
ConditionsArrayType& LinkingConditions
)
{
KRATOS_TRY
// Slave Node
Point2D<Node<3> >::Pointer point_geom_slave = Point2D<Node<3> >::Pointer( new Point2D<Node<3> >(Ids[slave]) );
Condition::Pointer SlaveNode = Condition::Pointer(new SlaveContactPointType(Id, point_geom_slave) );
// Master Node
Point2D<Node<3> >::Pointer point_geom_master = Point2D<Node<3> >::Pointer( new Point2D<Node<3> >(Ids[master]));
Condition::Pointer MasterNode = Condition::Pointer(new MasterContactPointType(Id, point_geom_master) );
Condition::GeometryType& Mgeom = MasterNode->GetGeometry();
Condition::GeometryType& Sgeom = SlaveNode ->GetGeometry();
Line2D2<Node<3> >::Pointer Lgeom = Line2D2<Node<3> >::Pointer( new Line2D2<Node<3> >(Sgeom(0), Mgeom(0) ) );
Condition::Pointer newLink = Condition::Pointer( new PointPointContactLink(Id,
Lgeom,
tempProperties,
SlaveNode,
MasterNode
) );
LinkingConditions.push_back(newLink);
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
// cuando dos objetos intersectan pero no sabes que nodo cae dentro
void CheckNearNodes(
const unsigned int& master,
const unsigned int& slave,
const PointerType& SlaveObject,
const PointerType& MasterObject,
std::vector<array_1d<NodePointerType, 2 > >& Ids,
std::vector<Near_Node>& Is_Near
)
{
KRATOS_TRY
std::vector<double> Distance;
std::vector<double> Distance_aux;
std::vector<double>::iterator it;
std::vector<double>::iterator it_2;
array_1d<NodePointerType, 2 > Id;
array_1d<double, 3> vector;
const Element::GeometryType& geom_0 = MasterObject->GetGeometry();
const Element::GeometryType& geom_1 = SlaveObject->GetGeometry();
double distance = 0.00;
array_1d<unsigned int, 9 > M;
array_1d<unsigned int, 9 > S;
M[0] = 0;
M[1] = 0;
M[2] = 0;
M[3] = 1;
M[4] = 1;
M[5] = 1;
M[6] = 2;
M[7] = 2;
M[8] = 2;
S[0] = 0;
S[1] = 1;
S[2] = 2;
S[3] = 0;
S[4] = 1;
S[5] = 2;
S[6] = 0;
S[7] = 1;
S[8] = 2;
// busco la distancia menor
for(unsigned int i = 0; i<geom_0.size(); i++)
{
for(unsigned int j = 0; j<geom_1.size(); j++)
{
noalias(vector) = ( geom_0[i]-geom_1[j]) ;
distance = norm_2(vector);
Distance.push_back(distance);
}
}
const double min = (*std::min_element(Distance.begin(), Distance.end() ) );
it = std::find(Distance.begin(), Distance.end(), min);
const int position = int(it-Distance.begin());
Id[master] = geom_0(M[position]);
Id[slave] = geom_1(S[position]);
Ids.push_back(Id);
Is_Near.push_back(no_near);
/*
//Check si dos corner chocan
std::vector<NodePointerType> nodes;
const bool test_one = VerifyToCornerIntersect(nodes, SlaveObject, MasterObject);
if( test_one==false && nodes.size()!=0)
{
KRATOS_WATCH("BBBBBBBBBBB")
if(nodes.size()==2)
{
Id[master] = nodes[0];
Id[slave] = nodes[1];
}
else
{
// si no se cumple lo anterior tomamos los nodos mas cercanos
// WARNING = Solo valido para un caso en que un solo nodo quede fuera
const double min = (*std::min_element(Distance.begin(), Distance.end() ) );
it = std::find(Distance.begin(), Distance.end(), min);
const int position = int(it-Distance.begin());
Id[master] = geom_0(M[position]);
Id[slave] = geom_1(S[position]);
}
Ids.push_back(Id);
Is_Near.push_back(no_near);
}
//NO VALIDO PARA ELEMTOS CON MAL RATIO
else
{
Distance_aux.resize(Distance.size());
Distance_aux = Distance;
const double min = (*std::min_element(Distance.begin(), Distance.end() ) );
it = std::find(Distance.begin(), Distance.end(), min);
const int position = int(it-Distance.begin());
Id[master] = geom_0(M[position]);
Id[slave] = geom_1(S[position]);
Ids.push_back(Id);
Is_Near.push_back(no_near);
const double min_2 = (*min_element_2(Distance_aux.begin(), Distance_aux.end(), min ) );
it_2 = std::find(Distance.begin(), Distance.end(), min_2);
const int position_2 = int(it_2-Distance.begin());
Id[master] = geom_0(M[position_2]);
Id[slave] = geom_1(S[position_2]);
Ids.push_back(Id);
Is_Near.push_back(no_near);
}
*/
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
// Saca elsegundo min de un vector
std::vector<double>::iterator min_element_2( const std::vector<double>::iterator first, const std::vector<double>::iterator last, const double& cond)
{
std::vector<double>::iterator second_lowest = first;
std::vector<double>::iterator first_1 = first;
std::vector<double>::iterator first_2 = first;
const int size = int(last- first);
int count = 0;
if (first==last) return last;
for(first_1=first; first_1!=last; first_1++)
{
if(*first_1!=cond)
for(first_2=first; first_2!=last; first_2++)
{
if(*first_2>cond && *first_2!=*first_1)
{
if(*first_1<*first_2)
{
count++;
continue;
}
else
break;
}
}
if(count==size-2)
{
*second_lowest = *first_1;
break;
}
else
count=0;
}
return second_lowest;
}
//*****************************************************************************************************
//*****************************************************************************************************
bool VerifyToCornerIntersect( std::vector<NodePointerType>& Ids,
const PointerType& SlaveObject,
const PointerType& MasterObject
)
{
KRATOS_TRY
WeakPointerVector<Condition>& neighb_cond_master = MasterObject->GetValue(NEIGHBOUR_CONDITIONS);
WeakPointerVector<Condition>& neighb_cond_slave = SlaveObject->GetValue(NEIGHBOUR_CONDITIONS);
std::vector<std::vector<unsigned int> > segment;
segment.resize(neighb_cond_slave.size());
vector<array_1d<double, 2> > Points0;
vector<array_1d<double, 2> > Points1;
array_1d<double, 2> Point;
unsigned int I = 0;
unsigned int II = 1;
unsigned int III = 1;
unsigned int IV = 0;
Points0.resize(2, false);
Points1.resize(2, false);
for(WeakPointerVector<Condition>::iterator cond_slave = neighb_cond_slave.begin(); cond_slave!= neighb_cond_slave.end(); ++cond_slave)
{
Condition::GeometryType& geom = cond_slave->GetGeometry();
Point[0] = 0.00;
Point[1] = 0.00;
Points0(0)[0] = geom[0].X();
Points0(0)[1] = geom[0].Y();
Points0(1)[0] = geom[1].X();
Points0(1)[1] = geom[1].Y();
I = 0;
III = 0;
for(WeakPointerVector< Condition >::iterator cond = neighb_cond_master.begin(); cond!= neighb_cond_master.end(); ++cond)
{
Condition::GeometryType& geom_3 = cond->GetGeometry();
Points1(0)[0] = geom_3[0].X();
Points1(0)[1] = geom_3[0].Y();
Points1(1)[0] = geom_3[1].X();
Points1(1)[1] = geom_3[1].Y();
if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)!=IT_EMPTY)
segment[IV].push_back(I);
I++;
III++;
if(III>neighb_cond_master.size())
break;
}
II++;
IV++;
if(II>neighb_cond_slave.size())
break;
}
/// dos triangulos que se intersectan pero no tienen nodo dentro.
/// Sus aristan chocan en dos partes del triangulo master
if(segment.size()==3)
if(segment[0].size()== 2 && segment[1].size()== 2 && segment[2].size()== 2)
return true;
if(neighb_cond_master.size()==2 && neighb_cond_slave.size()==2)
{
Condition::GeometryType& geom_1 = (neighb_cond_master(0).lock())->GetGeometry();
Condition::GeometryType& geom_2 = (neighb_cond_master(1).lock())->GetGeometry();
if(geom_1[0].Id()==geom_2[0].Id())
Ids.push_back(geom_1(0));
else if(geom_1[0].Id()==geom_2[1].Id())
Ids.push_back(geom_1(0));
else if(geom_1[1].Id()==geom_2[0].Id())
Ids.push_back(geom_1(1));
else if(geom_1[1].Id()==geom_2[1].Id())
Ids.push_back(geom_1(1));
else
std::cout<< "No node A " << std::endl;
Condition::GeometryType& geom_3 = (neighb_cond_slave(0).lock())->GetGeometry();
Condition::GeometryType& geom_4 = (neighb_cond_slave(1).lock())->GetGeometry();
if(geom_3[0].Id()==geom_4[0].Id())
Ids.push_back(geom_3(0));
else if(geom_3[0].Id()==geom_4[1].Id())
Ids.push_back(geom_3(0));
else if(geom_3[1].Id()==geom_4[0].Id())
Ids.push_back(geom_3(1));
else if(geom_3[1].Id()==geom_4[1].Id())
Ids.push_back(geom_3(1));
else
std::cout<< "No node B " << std::endl;
if(Ids.size()==2)
return false;
}
return true;
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
Near_Node CheckNearNodes(
const unsigned int& master,
const unsigned int& slave,
const NodePointerType& SlaveNode,
const PointerType& MasterObject,
array_1d<NodePointerType, 2 >& Ids
)
{
//std::vector<double> Distance;
array_1d<double, 3> vector;
array_1d<double, 3> coordinates = SlaveNode->Coordinates();
const Element::GeometryType& geom_0 = MasterObject->GetGeometry();
double distance = 0.00;
double distance2 = 1E10;;
// busco la distancia menor
for(unsigned int i = 0; i<geom_0.size(); i++)
{
noalias(vector) = ( geom_0(i)->Coordinates() - coordinates);
distance = norm_2(vector);
//Distance.push_back(distance);
if(distance<distance2)
{
distance2 = distance;
Ids[master] = geom_0(i);
}
}
Ids[slave] = SlaveNode;
// double max = (*std::max_element(Distance.begin(), Distance.end() ) );
// double min = (*std::min_element(Distance.begin(), Distance.end() ) );
// double ratio = std::fabs(min/max);
// if(ratio < 1E-8)
// return yes_near;
//
return no_near;
}
bool CreateLinkingConditions(
const unsigned int& Id,
const unsigned int& master,
const unsigned int& slave,
const array_1d<NodePointerType, 2 >& Ids,
const ContactPairType& it_pair,
const PropertiesType::Pointer& tempProperties,
Exist_Node& Exist,
ConditionsArrayType& LinkingConditions
)
{
if(mrdimension==2)
return CreateLinkingConditions2D(Id, master, slave, Ids, it_pair, tempProperties, Exist, LinkingConditions);
else
CreateLinkingConditions3D(Id, master, slave, Ids, it_pair, tempProperties, Exist, LinkingConditions);
return false;
}
//*****************************************************************************************************
//*****************************************************************************************************
bool CreateLinkingConditions2D(
const unsigned int& Id,
const unsigned int& master,
const unsigned int& slave,
const array_1d<NodePointerType, 2 >& Ids,
const ContactPairType& it_pair,
const PropertiesType::Pointer& tempProperties,
Exist_Node& Exist,
ConditionsArrayType& LinkingConditions
)
{
KRATOS_TRY
Condition::Pointer MasterFace;
array_1d<double,3 > Normal_r;
array_1d<double,3 > GL;
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
if(Exist==yes_nodes)
{
const bool exist_segment = LocateMasterSegment( Ids[slave],
Ids[master],
(it_pair)[master],
(it_pair)[slave],
MasterFace,
Exist);
if(exist_segment==true)
{
const double zero = 1.00E-6;
Condition::GeometryType& geom = MasterFace->GetGeometry();
array_1d<double, 3>& point_slave = Ids[slave]->Coordinates();
array_1d<double, 3>& point_left = geom.GetPoint(0);
array_1d<double,3> seg = geom.GetPoint(0)-geom.GetPoint(1);
noalias(GL) = point_slave - point_left;
MasterFace->Calculate(NORMAL, Normal_r, CurrentProcessInfo);
//const double distance = norm_2(seg);
//const double gat = inner_prod(GL, (1.00/distance)*seg) ;
const double gap = inner_prod(GL, Normal_r);
bool is_repited = bool(Ids[slave]->GetValue(IS_CONTACT_SLAVE)==0);
if(gap<zero && is_repited==true) // && gat>zero && gat<distance)
{
Ids[slave]->GetValue(IS_CONTACT_SLAVE) = 1;
Point2D<Node<3> >::Pointer point_geom = Point2D<Node<3> >::Pointer( new Point2D<Node<3> >(Ids[slave]));
Condition::Pointer SlaveNode = Condition::Pointer(new SlaveContactPointType(Id, point_geom) );
Condition::GeometryType& Mgeom = MasterFace->GetGeometry();
Condition::GeometryType& Sgeom = SlaveNode->GetGeometry();
//std::cout<<"Master = "<< (it_pair)[master]->Id() <<" Slave = " << (it_pair)[slave]->Id() <<std::endl;
//std::cout<<" Node (Y) = " << Ids[slave]->Id() << " Master Face = " << MasterFace->Id() << std::endl;
Triangle2D3<Node<3> >::Pointer Lgeom = Triangle2D3<Node<3> >::Pointer( new Triangle2D3<Node<3> >( Sgeom(0), Mgeom(0), Mgeom(1) ) );
Condition::Pointer newLink = Condition::Pointer( new PointSegmentContactLink(Id,
Lgeom,
tempProperties,
MasterFace,
SlaveNode));
LinkingConditions.push_back( newLink );
return exist_segment;
}
}
}
else if(Exist==no_nodes)
{
//KRATOS_WATCH(Ids[slave]->Id())
Point2D<Node<3> >::Pointer point_geom = Point2D<Node<3> >::Pointer( new Point2D<Node<3> >(Ids[slave]));
Condition::Pointer SlaveNode = Condition::Pointer(new SlaveContactPointType(Id, point_geom) );
//KRATOS_WATCH((it_pair)[master]->Id())
//KRATOS_WATCH((it_pair)[master]->GetValue(NEIGHBOUR_CONDITIONS).size())
MasterFace = ((it_pair)[master]->GetValue(NEIGHBOUR_CONDITIONS)(0)).lock();
//std::cout<<" Master = " << (it_pair)[master]->Id() <<" Slave = " << (it_pair)[slave]->Id() <<std::endl;
//std::cout<<" Node (N) = " << Ids[slave]->Id() << " Master Face = " << MasterFace->Id() << std::endl;
Condition::GeometryType& Mgeom = MasterFace->GetGeometry();
Condition::GeometryType& Sgeom = SlaveNode->GetGeometry();
Triangle2D3<Node<3> >::Pointer Lgeom = Triangle2D3<Node<3> >::Pointer( new Triangle2D3<Node<3> >( Sgeom(0), Mgeom(0), Mgeom(1) ) );
Condition::Pointer newLink = Condition::Pointer( new PointSegmentContactLink(Id,
Lgeom,
tempProperties,
MasterFace,
SlaveNode));
LinkingConditions.push_back( newLink );
return true;
}
else
return false;
return false;
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
void CreateLinkingConditions3D(
const unsigned int& Id,
const unsigned int& master,
const unsigned int& slave,
const array_1d<NodePointerType, 2 >& Ids,
const ContactPairType& it_pair,
const PropertiesType::Pointer& tempProperties,
Exist_Node& Exist,
ConditionsArrayType& LinkingConditions
)
{
KRATOS_TRY
//bool is_repited = bool(Ids[slave]->GetValue(IS_CONTACT_SLAVE)==0)
//if(is_repited==true)
{
Point MasterContactLocalPoint;
Point SlaveContactLocalPoint;
int SlaveIntegrationPointIndex = 0;
Condition::Pointer MasterFace = (Ids[slave])->GetValue(CONTACT_LINK_MASTER);
Point3D<Node<3> >::Pointer point_geom = Point3D<Node<3> >::Pointer( new Point3D<Node<3> >(Ids[slave]));
Condition::Pointer SlaveNode = Condition::Pointer(new SlaveContactPoint3D(Id, point_geom) );
Condition::GeometryType& Mgeom = MasterFace->GetGeometry();
Condition::GeometryType& Sgeom = SlaveNode->GetGeometry();
Tetrahedra3D4<Node<3> >::Pointer Lgeom = Tetrahedra3D4<Node<3> >::Pointer( new Tetrahedra3D4<Node<3> >( Sgeom(0), Mgeom(0), Mgeom(1), Mgeom(2) ) );
Condition::Pointer newLink = Condition::Pointer( new ContactLink3DExplicit(
Id,
Lgeom,
tempProperties,
MasterFace,
SlaveNode,
MasterContactLocalPoint,
SlaveContactLocalPoint,
SlaveIntegrationPointIndex
));
LinkingConditions.push_back( newLink );
}
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
bool LocateMasterSegment( const NodePointerType& SlaveNode,
const NodePointerType& MasterNode, //the most near
const PointerType& MasterObject,
const PointerType& SlaveObject,
Condition::Pointer& MasterFace,
Exist_Node& Exist
)
{
KRATOS_TRY
WeakPointerVector<Condition>& neighb_cond = MasterObject->GetValue(NEIGHBOUR_CONDITIONS);
if(neighb_cond.size()!=0)
{
switch(Exist)
{
case(yes_nodes):
{
if(neighb_cond.size()==1)
{
Condition::Pointer rCond_2;
Condition::Pointer& rCond_1 = SlaveNode->GetValue(CONTACT_LINK_MASTER);
//problema. Los Elementos internos no tienen master face
//KRATOS_WATCH(SlaveNode->Id())
//KRATOS_WATCH(rCond_1->Id())
const bool test_2 = Test_Four(SlaveNode, MasterObject, rCond_2);
//KRATOS_WATCH(MasterObject->Id())
//KRATOS_WATCH(SlaveNode->Id())
//KRATOS_WATCH(rCond_1->Id())
const bool test_3 = bool(neighb_cond(0).lock()->Id()==rCond_1->Id());
//KRATOS_WATCH("-----------------")
if(test_2==true)
{
if(rCond_1->Id()==rCond_2->Id())
MasterFace = rCond_1;
else
MasterFace = rCond_2; ///WARNING
}
else if( test_3==true)
{
MasterFace = rCond_1;
}
else if(neighb_cond.size()!=0)
MasterFace = neighb_cond(0).lock();
return true;
}
if(neighb_cond.size()>=2)
{
Condition::Pointer& rCond_1 = SlaveNode->GetValue(CONTACT_LINK_MASTER);
const bool is_corner = Is_Corner(SlaveNode, MasterObject);
if(is_corner==true)
{
Test_One_B_Distances(SlaveNode, MasterObject, rCond_1);
MasterFace = rCond_1;
return true;
}
const bool node_corner = Is_Node_Corner(SlaveNode, SlaveObject);
if(node_corner==true)
{
Condition::Pointer rCond_2;
Condition::Pointer rCond_3;
Condition::Pointer rCond_4;
const bool test_1 = Test_One_C (SlaveNode, MasterNode, rCond_2); //true==1
if(test_1==true)
{
if(rCond_2->Id()==rCond_1->Id())
MasterFace = rCond_1;
else
MasterFace = rCond_2; ///WARNING
return true;
}
//if(SlaveNode->Id()==29)
//std::cout<<"Test 1 " <<std::endl;
const bool test_2 = Test_Three (SlaveNode, MasterObject, rCond_3);
if(test_2==true)
{
if(rCond_3->Id()==rCond_1->Id())
MasterFace = rCond_1;
else
MasterFace = rCond_3;
return true;
}
//if(SlaveNode->Id()==29)
//std::cout<<"Test 2 " <<std::endl;
/*
if(SlaveNode->Id()==51)
std::cout<<"Test 3 " <<std::endl;
const bool test_3 = Test_Five (SlaveNode, MasterObject, rCond_4);
if(test_3==true){
if(rCond_4->Id()==rCond_1->Id())
MasterFace = rCond_1;
else
MasterFace = rCond_4; ///WARNING rCond_3;
return true;
}
*/
}
//std::cout<<" No test " <<std::endl;
Condition::Pointer rCond_2;
const bool test_4 = Test_One_B_Distances(SlaveNode, MasterObject, rCond_2);
if(test_4==true)
{
if(rCond_2->Id()==rCond_1->Id())
MasterFace = rCond_1;
else
MasterFace = rCond_2;
return true;
}
MasterFace = rCond_1;
return true;
}
break;
}
case(no_nodes):
{
//Condition::Pointer& rCond_1 = SlaveNode->GetValue(CONTACT_LINK_MASTER);
//Condition::Pointer rCond_2;
//Condition::Pointer rCond_3;
//Condition::Pointer rCond_4;
//const bool test_1 = Test_One_C(SlaveNode, MasterNode, rCond_2);
//const bool test_2 = Test_Two(SlaveNode, MasterObject, MasterFace)
//const bool test_3 = Test_Four(SlaveNode, MasterObject, MasterFace);
//const bool test_4 = Test_One_A(SlaveNode, MasterNode, MasterFace);
// if(test_1==true){
// if(rCond_2->Id()==rCond_1->Id())
// MasterFace = rCond_1;
// else
// MasterFace = rCond_2;
// return true;
// }
if(Test_One_C(SlaveNode, MasterNode, MasterFace))
{
return true;
}
if(Test_Two(SlaveNode, MasterObject, MasterFace))
{
return true;
}
if(Test_Four(SlaveNode, MasterObject, MasterFace) )
{
return true;
}
if(Test_One_A(SlaveNode, MasterNode, MasterFace))
{
return true;
}
MasterFace = SlaveNode->GetValue(CONTACT_LINK_MASTER);
return true;
break;
}
}
return false;
}
MasterFace = SlaveNode->GetValue(CONTACT_LINK_MASTER); // no estaba: Si no funciona comenatr y poner return false.
return true; // false
KRATOS_CATCH("")
}
// Si el nodo esta dentro de elemento
bool Is_Corner(
const NodePointerType& SlaveNode,
const PointerType& MasterObject)
{
KRATOS_TRY
WeakPointerVector<Condition>& neighb_cond_master = MasterObject->GetValue(NEIGHBOUR_CONDITIONS);
WeakPointerVector<Condition>& neighb_cond_slave = SlaveNode->GetValue(NEIGHBOUR_CONDITIONS);
std::vector<unsigned int> segment;
std::vector<unsigned int>::iterator it;
vector<array_1d<double, 2> > Points0;
vector<array_1d<double, 2> > Points1;
array_1d<double, 2> Point;
unsigned int I = 0;
unsigned int II = 1;
unsigned int III = 1;
Points0.resize(2, false);
Points1.resize(2, false);
for(WeakPointerVector<Condition>::iterator cond_slave = neighb_cond_slave.begin(); cond_slave!= neighb_cond_slave.end(); ++cond_slave)
{
Condition::GeometryType& geom = cond_slave->GetGeometry();
Point[0] = 0.00;
Point[1] = 0.00;
Points0(0)[0] = geom[0].X();
Points0(0)[1] = geom[0].Y();
Points0(1)[0] = geom[1].X();
Points0(1)[1] = geom[1].Y();
I = 0;
III = 1;
for(WeakPointerVector< Condition >::iterator cond = neighb_cond_master.begin(); cond!= neighb_cond_master.end(); ++cond)
{
Condition::GeometryType& geom_3 = cond->GetGeometry();
Points1(0)[0] = geom_3[0].X();
Points1(0)[1] = geom_3[0].Y();
Points1(1)[0] = geom_3[1].X();
Points1(1)[1] = geom_3[1].Y();
if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)!=IT_EMPTY)
{
if(segment.size()==0)
{
segment.push_back(I);
}
else
{
it = std::find(segment.begin(), segment.end(), I);
if(it==segment.end())
segment.push_back(I);
}
}
I++;
III++;
if(III>neighb_cond_master.size())
break;
}
II++;
if(II>neighb_cond_slave.size())
break;
}
if(segment.size()>=2)
return true;
return false;
KRATOS_CATCH("")
}
//************************************************************************************
//************************************************************************************
///verifica si un nodo es corner
bool Is_Node_Corner(const NodePointerType& SlaveNode,
const PointerType& SlaveObject
)
{
KRATOS_TRY
WeakPointerVector<Condition>& neighb_cond_node = SlaveNode->GetValue(NEIGHBOUR_CONDITIONS);
WeakPointerVector<Condition>& neighb_cond_object = SlaveObject->GetValue(NEIGHBOUR_CONDITIONS);
unsigned int count = 0;
///WARNING = Verificar para tres lados
if(neighb_cond_object.size()>=2)
for(unsigned int i = 0; i<neighb_cond_node.size(); i++)
for(unsigned int j = 0; j<neighb_cond_object.size(); j++)
if((neighb_cond_node(i).lock())->Id()==(neighb_cond_object(j).lock())->Id())
count++;
if(count==2)
return true;
return false;
KRATOS_CATCH("")
}
//************************************************************************************
//************************************************************************************
/// comparacion con desplazamientos
bool Test_One_A( const NodePointerType& SlaveNode,
const NodePointerType& MasterNode,
Condition::Pointer& rCond)
{
KRATOS_TRY
WeakPointerVector<Condition>& neighb_cond_master = MasterNode->GetValue(NEIGHBOUR_CONDITIONS);
if(neighb_cond_master.size()!=0)
{
std::vector<unsigned int> segment;
unsigned int I = 0;
unsigned int segmento = 0;
std::vector<array_1d<double, 2> > Points; // punto de interseccion del segmento
vector<array_1d<double, 2> > Points0;
vector<array_1d<double, 2> > Points1;
array_1d<double, 2> Point;
array_1d<double,3>& old_pos = SlaveNode->FastGetSolutionStepValue(DISPLACEMENT,3);
Points0.resize(2, false);
Points1.resize(2, false);
Points0(0)[0] = SlaveNode->X0() + old_pos[0];
Points0(0)[1] = SlaveNode->Y0() + old_pos[1];
Points0(1)[0] = SlaveNode->X();
Points0(1)[1] = SlaveNode->Y();
unsigned int JJ = 1;
for(WeakPointerVector< Condition >::iterator cond = neighb_cond_master.begin(); cond!= neighb_cond_master.end(); cond++)
{
Condition::GeometryType& geom_2 = cond->GetGeometry();
Points1(0)[0] = geom_2[0].X();
Points1(0)[1] = geom_2[0].Y();
Points1(1)[0] = geom_2[1].X();
Points1(1)[1] = geom_2[1].Y();
if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)==IT_POINT)
{
Points.push_back(Point);
segment.push_back(I);
}
I++;
JJ++;
if(JJ>neighb_cond_master.size())
break;
}
if (Points.size()!=0)
{
if (Points.size()==1)
{
segmento = segment[0];
}
else if (Points.size()>1)
{
double dist0 = 0;
array_1d<double, 2> rect;
std::vector<double> Distance;
std::vector<double>::iterator it;
int position = 0;
for(unsigned int i = 0; i<Points.size(); i++)
{
rect = Points0[1] - Points[i];
dist0 = std::sqrt(inner_prod(rect, rect ));
Distance.push_back(dist0);
}
const double max = (*std::max_element(Distance.begin(), Distance.end() ) );
it = std::find(Distance.begin(), Distance.end(), max);
position = int(it-Distance.begin());
segmento = segment[position];
}
rCond = neighb_cond_master(segmento).lock();
return true;
}
}
return false;
KRATOS_CATCH("")
}
bool Test_One_C( const NodePointerType& SlaveNode,
const NodePointerType& MasterNode,
Condition::Pointer& rCond)
{
KRATOS_TRY
WeakPointerVector<Condition>& neighb_cond = MasterNode->GetValue(NEIGHBOUR_CONDITIONS);
WeakPointerVector<Condition>& neighb_cond_slave = SlaveNode->GetValue(NEIGHBOUR_CONDITIONS);
if(neighb_cond.size()!=0 && neighb_cond_slave.size()!=0)
{
std::vector<unsigned int> segment;
std::vector<array_1d<double, 2> > Points; // punto de interseccion del segmento
vector<array_1d<double, 2> > Points0;
vector<array_1d<double, 2> > Points1;
array_1d<double, 2> Point;
// test with edges
unsigned int segmento = 0;
unsigned int I = 0;
unsigned int II = 1;
unsigned int III = 1;
Points0.resize(2, false);
Points1.resize(2, false);
for(WeakPointerVector<Condition>::iterator cond_slave = neighb_cond_slave.begin(); cond_slave!= neighb_cond.end(); ++cond_slave)
{
Condition::GeometryType& geom = cond_slave->GetGeometry();
Point[0] = 0.00;
Point[1] = 0.00;
Points0(0)[0] = geom[0].X();
Points0(0)[1] = geom[0].Y();
Points0(1)[0] = geom[1].X();
Points0(1)[1] = geom[1].Y();
I = 0;
III = 1;
for(WeakPointerVector< Condition >::iterator cond = neighb_cond.begin(); cond!= neighb_cond.end(); ++cond)
{
Condition::GeometryType& geom_3 = cond->GetGeometry();
Points1(0)[0] = geom_3[0].X();
Points1(0)[1] = geom_3[0].Y();
Points1(1)[0] = geom_3[1].X();
Points1(1)[1] = geom_3[1].Y();
if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)==IT_POINT)
{
Points.push_back(Point);
segment.push_back(I);
}
I++;
III++;
if(III>neighb_cond.size())
break;
}
II++;
if(II>neighb_cond_slave.size())
break;
}
if (Points.size()!=0)
{
if (Points.size()==1)
{
segmento = segment[0];
}
// en caso de que el nodo quede fuera e intersecte con dos aristas
else if (Points.size()>1)
{
Points0(0)[0] = SlaveNode->X();
Points0(0)[1] = SlaveNode->Y();
Points0(1)[0] = SlaveNode->X();
Points0(1)[1] = SlaveNode->Y();
double dist0 = 0.00;
array_1d<double, 2> rect;
std::vector<double> Distance;
std::vector<double>::iterator it;
int position = 0;
for(unsigned int i = 0; i<Points.size(); i++)
{
rect = Points0[1] - Points[i];
dist0 = std::sqrt(inner_prod(rect, rect ));
Distance.push_back(dist0);
}
const double max = (*std::max_element(Distance.begin(), Distance.end() ) );
it = std::find(Distance.begin(), Distance.end(), max);
position = int(it-Distance.begin());
segmento = segment[position];
}
rCond = neighb_cond(segmento).lock();
return true;
}
}
return false;
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
/// Busca la interseccion de la trayectoria del nodo esclavo
/// desde su tiempo actual hasta 3 paso atras con las aristas del
/// elemento master. La menor de todas es el segmento de contacto.
bool Test_One_B(const NodePointerType& SlaveNode,
const NodePointerType& MasterNode,
Condition::Pointer& rCond)
{
KRATOS_TRY
WeakPointerVector<Condition>& neighb_cond_master = MasterNode->GetValue(NEIGHBOUR_CONDITIONS);
if(neighb_cond_master.size()!=0 )
{
std::vector<unsigned int> segment;
unsigned int segmento = 0;
unsigned int I = 0;
std::vector<array_1d<double, 2> > Points; // punto de interseccion del segmento
vector<array_1d<double, 2> > Points0;
vector<array_1d<double, 2> > Points1;
array_1d<double, 2> Point;
array_1d<double,3>& old_pos = SlaveNode->FastGetSolutionStepValue(DISPLACEMENT,3);
Points0.resize(2, false);
Points1.resize(2, false);
Points0(0)[0] = SlaveNode->X0() + old_pos[0];
Points0(0)[1] = SlaveNode->Y0() + old_pos[1];
Points0(1)[0] = SlaveNode->X();
Points0(1)[1] = SlaveNode->Y();
/*
array_1d<double, 2> Dir;
Dir[0] = Points0(1)[0] - Points0(0)[0];
Dir[1] = Points0(1)[1] - Points0(0)[1];
noalias(Dir) = (1.00/(sqrt(inner_prod(Dir, Dir)))) * Dir;
Points0(0)[0] -= Dir[0];
Points0(0)[1] -= Dir[1];
Points0(1)[0] += Dir[0];
Points0(1)[1] += Dir[1];
*/
unsigned int JJ = 1;
for(WeakPointerVector< Condition >::iterator cond = neighb_cond_master.begin(); cond!= neighb_cond_master.end(); cond++)
{
Condition::GeometryType& geom_2 = cond->GetGeometry();
Points1(0)[0] = geom_2[0].X();
Points1(0)[1] = geom_2[0].Y();
Points1(1)[0] = geom_2[1].X();
Points1(1)[1] = geom_2[1].Y();
if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)!=IT_EMPTY)
{
Points.push_back(Point);
segment.push_back(I);
}
I++;
JJ++;
if(JJ>neighb_cond_master.size())
break;
}
if (Points.size()!=0)
{
if (Points.size()==1)
{
segmento = segment[0];
}
else if (Points.size()>1)
{
double dist0 = 0.00;
array_1d<double, 2> rect;
std::vector<double> Distance;
std::vector<double>::iterator it;
int position = 0;
for(unsigned int i = 0; i<Points.size(); i++)
{
rect = Points0[1] - Points[i];
dist0 = std::sqrt(inner_prod(rect, rect ));
Distance.push_back(dist0);
}
const double min = (*std::min_element(Distance.begin(), Distance.end() ) );
it = std::find(Distance.begin(), Distance.end(), min);
position = int(it-Distance.begin());
segmento = segment[position];
}
rCond = neighb_cond_master( segmento).lock();
return true;
}
}
return false;
KRATOS_CATCH("")
}
//Calculando distancias de puntos a segmentos
bool Test_One_B_Distances( const NodePointerType& SlaveNode,
const PointerType& MasterObject,
Condition::Pointer& rCond)
{
KRATOS_TRY
WeakPointerVector<Condition>& neighb_cond_master = MasterObject->GetValue(NEIGHBOUR_CONDITIONS);
std::vector<unsigned int> segment;
unsigned int I = 0;
unsigned int segmento = 0;
std::vector<double> Distances; // punto de interseccion del segmento
array_1d<double, 2> Points0;
vector<array_1d<double, 2> > Points1;
Points1.resize(2, false);
Points0[0] = SlaveNode->X();
Points0[1] = SlaveNode->Y();
Segment2D Segment1;
unsigned int JJ = 1;
for(WeakPointerVector< Condition >::iterator cond = neighb_cond_master.begin(); cond!= neighb_cond_master.end(); cond++)
{
Condition::GeometryType& geom_2 = cond->GetGeometry();
Points1(0)[0] = geom_2[0].X();
Points1(0)[1] = geom_2[0].Y();
Points1(1)[0] = geom_2[1].X();
Points1(1)[1] = geom_2[1].Y();
Segment1.AssignPointsAndComputeParameters(Points1[0], Points1[1]);
Distances.push_back(Segment1.DistPoint2Segment2D(Points0));
segment.push_back(I);
I++;
JJ++;
if(JJ>neighb_cond_master.size())
break;
}
if (Distances.size()!=0)
{
if (Distances.size()==1)
{
segmento = segment[0];
}
else if (Distances.size()>1)
{
std::vector<double>::iterator it;
int position = 0;
const double min = (*std::min_element(Distances.begin(), Distances.end() ) );
it = std::find(Distances.begin(), Distances.end(), min);
position = int(it-Distances.begin());
segmento = segment[position];
}
rCond = neighb_cond_master( segmento).lock();
return true;
}
return false;
KRATOS_CATCH("")
}
/// para cercanos
/// caso en que las aristas estan fuera
bool Test_Two( const NodePointerType& SlaveNode,
const PointerType& MasterObject,
Condition::Pointer& rCond)
{
KRATOS_TRY
WeakPointerVector<Condition>& neighb_cond = MasterObject->GetValue(NEIGHBOUR_CONDITIONS);
if(neighb_cond.size()!=0 )
{
std::vector<unsigned int> segment;
unsigned int I = 0;
unsigned int segmento = 0;
std::vector<array_1d<double, 2> > Points; // punto de interseccion del segmento
vector<array_1d<double, 2> > Points0;
vector<array_1d<double, 2> > Points1;
array_1d<double, 2> Point;
array_1d<double,3>& old_pos = SlaveNode->FastGetSolutionStepValue(DISPLACEMENT,3);
Points0.resize(2, false);
Points1.resize(2, false);
Points0(0)[0] = SlaveNode->X0() + old_pos[0];
Points0(0)[1] = SlaveNode->Y0() + old_pos[1];
Points0(1)[0] = SlaveNode->X();
Points0(1)[1] = SlaveNode->Y();
unsigned int JJ = 1;
for(WeakPointerVector< Condition >::iterator cond = neighb_cond.begin(); cond!= neighb_cond.end(); cond++)
{
Condition::GeometryType& geom_2 = cond->GetGeometry();
Points1(0)[0] = geom_2[0].X();
Points1(0)[1] = geom_2[0].Y();
Points1(1)[0] = geom_2[1].X();
Points1(1)[1] = geom_2[1].Y();
if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)==IT_POINT)
{
Points.push_back(Point);
segment.push_back(I);
}
I++;
JJ++;
if(JJ>neighb_cond.size())
break;
}
if (Points.size()!=0)
{
if (Points.size()==1)
{
segmento = segment[0];
}
else if (Points.size()>1)
{
double dist0 = 0.00;
array_1d<double, 2> rect;
std::vector<double> Distance;
std::vector<double>::iterator it;
int position = 0;
for(unsigned int i = 0; i<Points.size(); i++)
{
rect = Points0[1] - Points[i];
dist0 = std::sqrt(inner_prod(rect, rect ));
Distance.push_back(dist0);
}
const double max = (*std::max_element(Distance.begin(), Distance.end() ) );
it = std::find(Distance.begin(), Distance.end(), max);
position = int(it-Distance.begin());
segmento = segment[position];
}
rCond = neighb_cond(segmento).lock();
return true;
}
}
return false;
KRATOS_CATCH("")
}
/// Busca la interseccion de la trayectoria de nodo esclavo
/// con los egbes de los elemntos. A Diferencia del test_One_B este lo hace con los elementos; no con su nodo master.
bool Test_Three(const NodePointerType& SlaveNode,
const PointerType& MasterObject,
Condition::Pointer& rCond)
{
KRATOS_TRY
WeakPointerVector<Condition>& neighb_cond = MasterObject->GetValue(NEIGHBOUR_CONDITIONS);
if(neighb_cond.size()!=0 )
{
std::vector<unsigned int> segment;
unsigned int I = 0;
unsigned int segmento = 0;
std::vector<array_1d<double, 2> > Points; // punto de interseccion del segmento
vector<array_1d<double, 2> > Points0;
vector<array_1d<double, 2> > Points1;
array_1d<double, 2> Point;
array_1d<double,3>& old_pos = SlaveNode->FastGetSolutionStepValue(DISPLACEMENT,3);
Points0.resize(2, false);
Points1.resize(2, false);
Points0(0)[0] = SlaveNode->X0() + old_pos[0];
Points0(0)[1] = SlaveNode->Y0() + old_pos[1];
Points0(1)[0] = SlaveNode->X();
Points0(1)[1] = SlaveNode->Y();
unsigned int JJ = 1;
for(WeakPointerVector< Condition >::iterator cond = neighb_cond.begin(); cond!= neighb_cond.end(); cond++)
{
Condition::GeometryType& geom_2 = cond->GetGeometry();
Points1(0)[0] = geom_2[0].X();
Points1(0)[1] = geom_2[0].Y();
Points1(1)[0] = geom_2[1].X();
Points1(1)[1] = geom_2[1].Y();
if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)!=IT_EMPTY)
{
Points.push_back(Point);
segment.push_back(I);
}
I++;
JJ++;
if(JJ>neighb_cond.size())
break;
}
if (Points.size()!=0)
{
if (Points.size()==1)
{
segmento = segment[0];
}
else if (Points.size()>1)
{
double dist0 = 0.00;
array_1d<double, 2> rect;
std::vector<double> Distance;
std::vector<double>::iterator it;
int position = 0;
for(unsigned int i = 0; i<Points.size(); i++)
{
rect = Points0[1] - Points[i];
dist0 = std::sqrt(inner_prod(rect, rect ));
Distance.push_back(dist0);
}
const double min = (*std::min_element(Distance.begin(), Distance.end() ) );
it = std::find(Distance.begin(), Distance.end(), min);
position = int(it-Distance.begin());
segmento = segment[position];
}
rCond = neighb_cond(segmento).lock();
return true;
}
}
return false;
KRATOS_CATCH("")
}
/// para cercanos
/// caso en que las aristas esten fuera de un elemento
bool Test_Four( const NodePointerType& SlaveNode,
const PointerType& MasterObject,
Condition::Pointer& rCond)
{
KRATOS_TRY
WeakPointerVector<Condition>& neighb_cond = MasterObject->GetValue(NEIGHBOUR_CONDITIONS);
WeakPointerVector<Condition>& neighb_cond_slave = SlaveNode->GetValue(NEIGHBOUR_CONDITIONS);
if(neighb_cond.size()!=0 && neighb_cond_slave.size()!=0)
{
array_1d<double,3>& old_pos = SlaveNode->FastGetSolutionStepValue(DISPLACEMENT,3);
std::vector<unsigned int> segment;
std::vector<array_1d<double, 2> > Points; // punto de interseccion del segmento
vector<array_1d<double, 2> > Points0;
vector<array_1d<double, 2> > Points1;
array_1d<double, 2> Point;
// test with edges
unsigned int segmento = 0;
unsigned int I = 0;
unsigned int II = 1;
unsigned int III = 1;
Points0.resize(2, false);
Points1.resize(2, false);
for(WeakPointerVector<Condition>::iterator cond_slave = neighb_cond_slave.begin(); cond_slave!= neighb_cond.end(); ++cond_slave)
{
Condition::GeometryType& geom = cond_slave->GetGeometry();
Point[0] = 0.00;
Point[1] = 0.00;
Points0(0)[0] = geom[0].X();
Points0(0)[1] = geom[0].Y();
Points0(1)[0] = geom[1].X();
Points0(1)[1] = geom[1].Y();
I = 0;
III = 1;
for(WeakPointerVector< Condition >::iterator cond = neighb_cond.begin(); cond!= neighb_cond.end(); ++cond)
{
Condition::GeometryType& geom_3 = cond->GetGeometry();
Points1(0)[0] = geom_3[0].X();
Points1(0)[1] = geom_3[0].Y();
Points1(1)[0] = geom_3[1].X();
Points1(1)[1] = geom_3[1].Y();
if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)==IT_POINT)
{
Points.push_back(Point);
segment.push_back(I);
}
I++;
III++;
if(III>neighb_cond.size())
break;
}
II++;
if(II>neighb_cond_slave.size())
break;
}
if (Points.size()!=0)
{
if (Points.size()==1)
{
segmento = segment[0];
}
// en caso de que el nodo quede fuera e intersecte con dos aristas
else if (Points.size()>1)
{
Points0(0)[0] = SlaveNode->X0() + old_pos[0];
Points0(0)[1] = SlaveNode->Y0() + old_pos[1];
Points0(1)[0] = SlaveNode->X();
Points0(1)[1] = SlaveNode->Y();
double dist0 = 0.00;
array_1d<double, 2> rect;
std::vector<double> Distance;
std::vector<double>::iterator it;
int position = 0;
for(unsigned int i = 0; i<Points.size(); i++)
{
rect = Points0[1] - Points[i];
dist0 = std::sqrt(inner_prod(rect, rect ));
Distance.push_back(dist0);
}
const double max = (*std::max_element(Distance.begin(), Distance.end() ) );
it = std::find(Distance.begin(), Distance.end(), max);
position = int(it-Distance.begin());
segmento = segment[position];
}
rCond = neighb_cond(segmento).lock();
return true;
}
}
return false;
KRATOS_CATCH("")
}
/// Buscla interseccion de las aristas del nodos slave con las aristas del nodo master
bool Test_Five( const NodePointerType& SlaveNode,
const PointerType& MasterObject,
Condition::Pointer& rCond)
{
KRATOS_TRY
WeakPointerVector<Condition>& neighb_cond = MasterObject->GetValue(NEIGHBOUR_CONDITIONS);
WeakPointerVector<Condition>& neighb_cond_slave = SlaveNode->GetValue(NEIGHBOUR_CONDITIONS);
if(neighb_cond.size()!=0 && neighb_cond_slave.size()!=0)
{
array_1d<double,3>& old_pos = SlaveNode->FastGetSolutionStepValue(DISPLACEMENT,3);
std::vector<unsigned int> segment;
std::vector<array_1d<double, 2> > Points; // punto de interseccion del segmento
vector<array_1d<double, 2> > Points0;
vector<array_1d<double, 2> > Points1;
array_1d<double, 2> Point;
// test with edges
unsigned int segmento = 0;
unsigned int I = 0;
unsigned int II = 1;
unsigned int III = 1;
Points0.resize(2, false);
Points1.resize(2, false);
for(WeakPointerVector<Condition>::iterator cond_slave = neighb_cond_slave.begin(); cond_slave!= neighb_cond.end(); ++cond_slave)
{
Condition::GeometryType& geom = cond_slave->GetGeometry();
Point[0] = 0.00;
Point[1] = 0.00;
Points0(0)[0] = geom[0].X();
Points0(0)[1] = geom[0].Y();
Points0(1)[0] = geom[1].X();
Points0(1)[1] = geom[1].Y();
I = 0;
III = 1;
for(WeakPointerVector< Condition >::iterator cond = neighb_cond.begin(); cond!= neighb_cond.end(); ++cond)
{
Condition::GeometryType& geom_3 = cond->GetGeometry();
Points1(0)[0] = geom_3[0].X();
Points1(0)[1] = geom_3[0].Y();
Points1(1)[0] = geom_3[1].X();
Points1(1)[1] = geom_3[1].Y();
if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)!=IT_EMPTY)
{
Points.push_back(Point);
segment.push_back(I);
}
I++;
III++;
if(III>neighb_cond.size())
break;
}
II++;
if(II>neighb_cond_slave.size())
break;
}
if (Points.size()!=0)
{
if (Points.size()==1)
{
segmento = segment[0];
}
// en caso de que el nodo quede fuera e intersecte con dos aristas
else if (Points.size()>1)
{
Points0(0)[0] = SlaveNode->X0() + old_pos[0];
Points0(0)[1] = SlaveNode->Y0() + old_pos[1];
Points0(1)[0] = SlaveNode->X();
Points0(1)[1] = SlaveNode->Y();
double dist0 = 0.00;
array_1d<double, 2> rect;
std::vector<double> Distance;
std::vector<double>::iterator it;
int position = 0;
for(unsigned int i = 0; i<Points.size(); i++)
{
rect = Points0[1] - Points[i];
dist0 = std::sqrt(inner_prod(rect, rect ));
Distance.push_back(dist0);
}
const double min = (*std::min_element(Distance.begin(), Distance.end() ) );
it = std::find(Distance.begin(), Distance.end(), min);
position = int(it-Distance.begin());
segmento = segment[position];
}
rCond = neighb_cond(segmento).lock();
return true;
}
}
return false;
KRATOS_CATCH("")
}
//************************************************************************************
//************************************************************************************
void CalculateBoundaryContour2D(ConditionsArrayType& MasterConditions)
{
KRATOS_TRY
//std::cout<< std::endl;
std::cout<<" CALCULATING CONTOURS 2D" << std::endl;
typedef WeakPointerVector< Element >::iterator ElementIteratorType;
ContainerType& rElements = mr_model_part.ElementsArray();
ConditionsArrayType& rConditions = mr_model_part.Conditions();
IteratorType it_begin = rElements.begin();
IteratorType it_end = rElements.end();
array_1d<NodePointerType,2> Pair;
unsigned int face = 0;
unsigned int Id = rConditions.size() + 1 ;
bool is_repited = false;
for(IteratorType elem = it_begin; elem!=it_end; elem++)
{
Element::GeometryType& geom_1 = (*elem)->GetGeometry();
WeakPointerVector< Element >& neighb_elems = (*elem)->GetValue(NEIGHBOUR_ELEMENTS);
//WeakPointerVector< Condition >& neighb_cond = (*elem)->GetValue(NEIGHBOUR_CONDITIONS);
//neighb_cond.clear(); ///WARNING
//node_boundary.resize(neighb_elems.size(), false);
// Puede incluir como vecnino el mismo en caso de que hayan menos de 3 elemtos veninos.
// ckeck si se repited elmento
// ElementIteratorType no necesita especificarse el *
for( ElementIteratorType neighb_elem = neighb_elems.begin(); neighb_elem!= neighb_elems.end(); neighb_elem++)
{
if (neighb_elem->Id() == (*elem)->Id() )
{
if(face == 0) // edge 1-2
{
Pair[0] = geom_1(1);
Pair[1] = geom_1(2);
CreateMasterConditions2D(Pair, elem, Id, MasterConditions);
geom_1[1].GetValue(IS_BOUNDARY) = 1; //FastGetSolutionStepValue(IS_BOUNDARY) = 1.00;
geom_1[2].GetValue(IS_BOUNDARY) = 1; //FastGetSolutionStepValue(IS_BOUNDARY) = 1.00;
Id++;
}
if (face==1) // edge 2-0
{
Pair[0] = geom_1(2);
Pair[1] = geom_1(0);
CreateMasterConditions2D(Pair, elem, Id, MasterConditions);
geom_1[2].GetValue(IS_BOUNDARY) = 1; //FastGetSolutionStepValue(IS_BOUNDARY) = 1.00;
geom_1[0].GetValue(IS_BOUNDARY) = 1; //FastGetSolutionStepValue(IS_BOUNDARY) = 1.00;
Id++;
}
if (face==2) // edge 0-1
{
Pair[0] = geom_1(0);
Pair[1] = geom_1(1);
CreateMasterConditions2D(Pair, elem, Id, MasterConditions);
geom_1[0].GetValue(IS_BOUNDARY) = 1; //FastGetSolutionStepValue(IS_BOUNDARY) = 1.00;
geom_1[1].GetValue(IS_BOUNDARY) = 1; // FastGetSolutionStepValue(IS_BOUNDARY) = 1.00;
Id++;
}
if(is_repited==false)
{
(*elem)->GetValue(IS_BOUNDARY) = 1;
mBoundaryElements.push_back(*elem);
is_repited = true;
}
}
face++;
}
face = 0;
is_repited = false;
}
unsigned int I = 0;
NodesContainerType& rNodes = mr_model_part.NodesArray();
for(NodesIteratorType inode = rNodes.begin(); inode!=rNodes.end(); ++inode)
{
if((*inode)->GetValue(IS_BOUNDARY) == 1)
{
mBoundaryNodes.push_back(*inode);
WeakPointerVector<Element>& neighb_elems = (*inode)->GetValue(NEIGHBOUR_ELEMENTS);
I = 0;
for( ElementIteratorType neighb_elem = neighb_elems.begin(); neighb_elem!= neighb_elems.end(); ++neighb_elem)
{
if(neighb_elem->GetValue(IS_BOUNDARY)!=1)
{
neighb_elem->GetValue(IS_BOUNDARY)=1;
mBoundaryElements.push_back(neighb_elems(I).lock());
}
I++;
}
}
}
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
void CalculateBoundaryContour3D(ConditionsArrayType& MasterConditions)
{
KRATOS_TRY
//std::cout<< std::endl;
std::cout<<"CALCULATING CONTOURS 3D"<< std::endl;
typedef WeakPointerVector< Element >::iterator ElementIteratorType;
ContainerType& rElements = mr_model_part.ElementsArray();
ConditionsArrayType& rConditions = mr_model_part.Conditions();
IteratorType it_begin = rElements.begin();
IteratorType it_end = rElements.end();
array_1d<NodePointerType,3> Pair;
unsigned int face = 0;
unsigned int Id = rConditions.size() + 1 ;
bool is_repited = false;
for(IteratorType elem = it_begin; elem!=it_end; elem++)
{
Element::GeometryType& geom_1 = (*elem)->GetGeometry();
WeakPointerVector< Element >& neighb_elems = (*elem)->GetValue(NEIGHBOUR_ELEMENTS);
//WeakPointerVector< Condition >& neighb_cond = (*elem)->GetValue(NEIGHBOUR_CONDITIONS);
for( ElementIteratorType neighb_elem = neighb_elems.begin(); neighb_elem!= neighb_elems.end(); neighb_elem++)
{
if(neighb_elem->Id() == (*elem)->Id() )
{
if(face == 0)
{
Pair[0] = geom_1(1);
Pair[1] = geom_1(2);
Pair[2] = geom_1(3);
CreateMasterConditions3D(Pair, elem, Id, MasterConditions);
geom_1[1].GetValue(IS_BOUNDARY) = 1;
geom_1[2].GetValue(IS_BOUNDARY) = 1;
geom_1[3].GetValue(IS_BOUNDARY) = 1;
Id++;
}
if(face ==1)
{
Pair[0] = geom_1(0);
Pair[1] = geom_1(3);
Pair[2] = geom_1(2);
CreateMasterConditions3D(Pair, elem, Id, MasterConditions);
geom_1[0].GetValue(IS_BOUNDARY) = 1;
geom_1[3].GetValue(IS_BOUNDARY) = 1;
geom_1[2].GetValue(IS_BOUNDARY) = 1;
Id++;
}
if(face == 2)
{
Pair[0] = geom_1(0);
Pair[1] = geom_1(1);
Pair[2] = geom_1(3);
CreateMasterConditions3D(Pair, elem, Id, MasterConditions);
geom_1[0].GetValue(IS_BOUNDARY) = 1;
geom_1[1].GetValue(IS_BOUNDARY) = 1;
geom_1[3].GetValue(IS_BOUNDARY) = 1;
Id++;
}
if(face == 3)
{
Pair[0] = geom_1(0);
Pair[1] = geom_1(2);
Pair[2] = geom_1(1);
CreateMasterConditions3D(Pair, elem, Id, MasterConditions);
geom_1[0].GetValue(IS_BOUNDARY) = 1;
geom_1[2].GetValue(IS_BOUNDARY) = 1;
geom_1[1].GetValue(IS_BOUNDARY) = 1;
Id++;
}
if(is_repited==false)
{
(*elem)->GetValue(IS_BOUNDARY) = 1;
mBoundaryElements.push_back(*elem);
is_repited = true;
}
}
face++;
}
face = 0;
is_repited = false;
}
unsigned int I = 0;
NodesContainerType& rNodes = mr_model_part.NodesArray();
for(NodesIteratorType inode = rNodes.begin(); inode!=rNodes.end(); ++inode)
{
if((*inode)->GetValue(IS_BOUNDARY) == 1)
{
WeakPointerVector<Element>& neighb_elems = (*inode)->GetValue(NEIGHBOUR_ELEMENTS);
I = 0;
for( ElementIteratorType neighb_elem = neighb_elems.begin(); neighb_elem!= neighb_elems.end(); ++neighb_elem)
{
if(neighb_elem->GetValue(IS_BOUNDARY)!=1)
{
neighb_elem->GetValue(IS_BOUNDARY)=1;
mBoundaryElements.push_back(neighb_elems(I).lock());
}
I++;
}
}
}
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
void CreateMasterConditions2D(const array_1d<NodePointerType,2>& rPair, const IteratorType& elem, const unsigned int& Id, ConditionsArrayType& MasterConditions)
{
KRATOS_TRY
Line2D2<Node<3> >::Pointer pgeom = Line2D2<Node<3> >::Pointer (new Line2D2<Node<3> >( rPair[0], rPair[1] ) ) ;
Condition::Pointer MasterSegment = Condition::Pointer(new MasterContactFaceType(Id, pgeom ) ) ;
MasterSegment->GetValue(NEIGHBOUR_ELEMENTS).push_back(*(elem));
((rPair)[0])->GetValue(NEIGHBOUR_CONDITIONS).push_back(MasterSegment);
((rPair)[1])->GetValue(NEIGHBOUR_CONDITIONS).push_back(MasterSegment);
(*elem)->GetValue(NEIGHBOUR_CONDITIONS).push_back(MasterSegment);
MasterConditions.push_back(MasterSegment);
ConditionsArrayType& pConditions = mr_model_part.Conditions();
pConditions.push_back(MasterSegment);
KRATOS_CATCH("")
}
void CreateMasterConditions3D(const array_1d<NodePointerType,3>& rPair,
const IteratorType& elem,
const unsigned int& Id,
ConditionsArrayType& MasterConditions)
{
KRATOS_TRY
Triangle2D3<Node<3> >::Pointer pgeom = Triangle2D3<Node<3> >::Pointer( new Triangle2D3<Node<3> > ( rPair[0], rPair[1], rPair[2]) );
Condition::Pointer MasterSurface = Condition::Pointer(new MasterContactFace3D(Id, pgeom) );
MasterSurface->GetValue(NEIGHBOUR_ELEMENTS).push_back(*(elem));
MasterSurface->GetValue(IS_BOUNDARY) = 1;
rPair[0]->GetValue(NEIGHBOUR_CONDITIONS).push_back(MasterSurface);
rPair[1]->GetValue(NEIGHBOUR_CONDITIONS).push_back(MasterSurface);
rPair[2]->GetValue(NEIGHBOUR_CONDITIONS).push_back(MasterSurface);
(*elem)->GetValue(NEIGHBOUR_CONDITIONS).push_back(MasterSurface);
MasterConditions.push_back(MasterSurface);
ConditionsArrayType& pConditions = mr_model_part.Conditions();
pConditions.push_back(MasterSurface);
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
void FiltratePairContacts2D(ContainerContactPair& PairContacts)
{
KRATOS_TRY
ContainerContactPair temp;
std::vector<unsigned int> id;
for(IteratorContainerContactPair ipair = PairContacts.begin(); ipair!=PairContacts.end(); ipair++)
{
if(SearchCommonNode( (*ipair)[0], (*ipair)[1], id ))
{
/// Se localiza que comparte dos nodos en comun
if( id.size()!=2 && (SearchInsideNode((*ipair)[0], (*ipair)[1], id[0]))==true)
{
temp.push_back(*(ipair));
}
}
else
{
temp.push_back(*(ipair));
}
id.clear();
}
PairContacts.swap(temp);
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
void FiltratePairContacts3D(ContainerContactPair& PairContacts)
{
KRATOS_TRY
ContainerContactPair temp;
std::vector<unsigned int> id;
for(IteratorContainerContactPair ipair = PairContacts.begin(); ipair!=PairContacts.end(); ipair++)
{
if(SearchCommonNode( (*ipair)[0], (*ipair)[1], id )==false)
{
/*
if( id.size()!=3){
for(unsigned int i = 0; i<id.size(); i++)
if(SearchInsideNode((*ipair)[0], (*ipair)[1], id[i] )==true)
{
KRATOS_WATCH(id[i])
temp.push_back(*(ipair));
break;
}
}
}
else
*/
//{
temp.push_back(*(ipair));
//}
id.clear();
}
}
PairContacts.swap(temp);
KRATOS_CATCH("")
}
bool FiltratePairContacts(const PointerType& elem1, const PointerType& elem2)
{
KRATOS_TRY
std::vector<unsigned int> id;
if(mrdimension==2)
{
const bool test_1 = SearchCommonNode(elem1, elem2, id);
if(test_1)
{
/// Se localiza que comparte dos nodos en comun
if(id.size()==2)
return false;
else if(id.size()!=2 && (SearchInsideNode(elem1, elem2, id[0])==true))
return true;
}
else
return true;
}
else
{
if(SearchCommonNode(elem1, elem2, id))
{
return false;
}
else
{
return true;
}
}
return false;
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
/// Se busca el nodo comun entre los contactos
bool SearchCommonNode(const PointerType& elem1, const PointerType& elem2, std::vector<unsigned int>& id)
{
KRATOS_TRY
Element::GeometryType& geom1 = (elem1)->GetGeometry();
Element::GeometryType& geom2 = (elem2)->GetGeometry();
/// buscando el nodo comun
for(unsigned int i = 0; i<geom1.size(); i++)
{
for(unsigned int j = 0; j<geom1.size(); j++)
{
if(geom1[i].Id()==geom2[j].Id())
{
id.push_back(geom1[i].Id());
}
}
}
return id.size()!=0;
//if( id.size()!=0) return true;
//return false;
KRATOS_CATCH("")
}
/// Verifica si los nodos que no es el comun cae dentro del elemento
//*****************************************************************************************************
//*****************************************************************************************************
bool SearchInsideNode(const PointerType& elem1, const PointerType& elem2, const unsigned int& ide)
{
KRATOS_TRY
Element::GeometryType& geom1 = (elem1)->GetGeometry();
Element::GeometryType& geom2 = (elem2)->GetGeometry();
array_1d<double, 3> result;
///CoordinatesArrayType result;
/// buscando si uno de los nodos entra dentro del elemento
for(unsigned int i = 0; i<geom1.size(); i++)
{
if(geom2[i].Id()!=ide)
{
if(geom1.IsInside(geom2[i], result))
{
return true;
}
}
}
for(unsigned int i = 0; i<geom2.size(); i++)
{
if(geom1[i].Id()!=ide)
{
if(geom2.IsInside(geom1[i], result))
{
return true;
}
}
}
return false;
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
void NodeInside(const PointerType& MasterObject, const PointerType& SlaveObject, std::vector<NodePointerType>& InsideNodes)
{
KRATOS_TRY
Element::GeometryType& geom_master = MasterObject->GetGeometry();
Element::GeometryType& geom_slave = SlaveObject->GetGeometry();
std::vector<unsigned> Nodes;
/// buscando el nodo comun
bool commun = false;
for(unsigned int i = 0; i<geom_slave.size(); i++)
{
commun = false;
for(unsigned int j = 0; j<geom_master.size(); j++)
{
if(geom_slave[i].Id()==geom_master[j].Id())
{
commun = true;
}
}
if(commun==false)
Nodes.push_back(i);
}
array_1d<double, 3> result;
for (unsigned int i = 0; i<Nodes.size(); i++ )
{
if(geom_master.IsInside(geom_slave[Nodes[i]], result))
{
InsideNodes.push_back(geom_slave(Nodes[i]));
}
}
KRATOS_CATCH("")
}
void ResetFlagComputeBoundaryContour(const bool& rflag)
{
mcompute_boundary_contour = rflag;
}
private:
ModelPart mr_model_part;
unsigned int mrdimension;
double mpenalty_factor;
bool mcompute_boundary_contour;
NodesContainerType mBoundaryNodes;
ContainerType mBoundaryElements;
ContainerContactPair mPairContacts;
ConditionsArrayType mMasterConditionsArray;
//WeakPointerVector<NodeType> mBoundaryNodes;
//*****************************************************************************************************
//*****************************************************************************************************
/// WARNING = To be parallel
void IdentifyMasterSegment2D()
{
KRATOS_TRY
std::cout<< " IDENTIFYING THE MASTER 2D SEGMENT " <<std::endl;
NodesArrayType& pNodes = mr_model_part.Nodes();
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
#ifdef _OPENMP
int number_of_threads = 1; //omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
//int I = 0;
//double g = 0.00;
//double g_old = 0.00;
// double gl = 0.00;
// double gr = 0.00;
//double compare_distance = 0.00;
double pr = 0.00;
double pl = 0.00;
double wr = 0.00;
double wl = 0.00;
array_1d<double, 3> CS = ZeroVector(3);
array_1d<double, 3> CL = ZeroVector(3);
array_1d<double, 3> CR = ZeroVector(3);
array_1d<double, 3> Normal = ZeroVector(3);
array_1d<double, 3> Normal_s = ZeroVector(3);
array_1d<double, 3> Normal_r = ZeroVector(3);
array_1d<double, 3> Normal_l = ZeroVector(3);
array_1d<double, 3> GR = ZeroVector(3);
array_1d<double, 3> GL = ZeroVector(3);
double distance_r = 0.00;
double distance_l = 0.00;
double N = 0.00;
array_1d<double, 3> e3;
e3[0] = 0;
e3[1] = 0;
e3[2] = 1.00;
Segment2D rSegment_r;
Segment2D rSegment_l;
//#pragma omp parallel for private(I, g, g_old, gl, gr, compare_distance, CS, CL, CR, Normal, rSegment)
for(int k=0; k<number_of_threads; k++)
{
NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
if(i->GetValue(IS_BOUNDARY)==1)
{
WeakPointerVector<Condition>& neighb_cond = ((i)->GetValue(NEAR_NODE))->GetValue(NEIGHBOUR_CONDITIONS);
WeakPointerVector<Condition>& neighb_cond_slave = (i)->GetValue(NEIGHBOUR_CONDITIONS);
if( neighb_cond.size()!=0 && neighb_cond_slave.size()!=0 )
{
array_1d<int,2> RL;
//double& distance = i->GetValue(DISTANCE);
const unsigned int& ID = i->GetValue(NEAR_NODE)->Id();
const unsigned int& ID_slave = i->Id();
/// the right and left
RL[0] = 0;
RL[1] = 0;
if(neighb_cond(0).lock()->GetGeometry()(0)->Id()==ID)
RL[0] = 1;
else
RL[1] = 1;
const Condition::Pointer& first = neighb_cond(RL[0]).lock();
const Condition::Pointer& last = neighb_cond(RL[1]).lock();
/// the right and left
RL[0] = 0;
RL[1] = 0;
if(neighb_cond_slave(0).lock()->GetGeometry()(0)->Id()==ID_slave)
RL[0] = 1;
else
RL[1] = 1;
const Condition::Pointer& first_s = neighb_cond_slave(RL[0]).lock();
const Condition::Pointer& last_s = neighb_cond_slave(RL[1]).lock();
Condition::GeometryType& geom_first = first->GetGeometry();
Condition::GeometryType& geom_last = last->GetGeometry();
array_1d<double, 3>& point_closest = i->GetValue(NEAR_NODE)->Coordinates();
array_1d<double, 3>& point_slave = i->Coordinates();
array_1d<double, 3>& point_left = geom_last.GetPoint(1);
array_1d<double, 3>& point_right = geom_first.GetPoint(0);
noalias(CS) = point_closest - point_slave;
noalias(CL) = point_left - point_closest;
noalias(CR) = point_closest - point_right;
noalias(GR) = point_slave - point_right;
noalias(GL) = point_slave - point_closest;
//KRATOS_WATCH("CCCCCCCCCCC")
//KRATOS_WATCH(i->Id())
//KRATOS_WATCH(neighb_cond_slave.size())
//KRATOS_WATCH(first_s->Id())
//KRATOS_WATCH(last_s->Id())
first_s->Calculate(NORMAL, Normal_r, CurrentProcessInfo);
last_s->Calculate(NORMAL, Normal_l, CurrentProcessInfo);
//KRATOS_WATCH("AKIIIIIIIIIIIIIIIII")
noalias(Normal_s) = Normal_r + Normal_l;
Normal_s = (1.00/norm_2(Normal_s)) * Normal_s;
first->Calculate(NORMAL, Normal_r, CurrentProcessInfo);
last->Calculate(NORMAL, Normal_l, CurrentProcessInfo);
//const double& cs = norm_2(CS);
const double& cl = norm_2(CL);
const double& cr = norm_2(CR);
noalias(CL) = CL * (1.00/cl);
noalias(CR) = CR * (1.00/cr);
// gr = inner_prod(GR,Normal_r);
// gl = inner_prod(GL,Normal_l);
pr = -inner_prod(GL,CR);
if(pr<=0.00) pr = 0.00;
pl = inner_prod(GL,CL);
if(pl<=0.00) pl = 0.00;
if(std::fabs(pr-pl)<=1E-14)
{
pr = 1.00;
pl =1.00;
}
wr = pr/(pr + pl);
wl = 1.00 - wr;
N = norm_2( pr * Normal_r + pl * Normal_l );
noalias(Normal) = (1.00/N) * ( pr * Normal_r + pl * Normal_l);
rSegment_r.AssignPointsAndComputeParameters(point_right, point_closest);
rSegment_l.AssignPointsAndComputeParameters(point_closest, point_left);
distance_r = rSegment_r.DistPoint2Segment2D(point_slave);
distance_l = rSegment_l.DistPoint2Segment2D(point_slave);
/// decidiendo el mejor segmento
const double& dot_l = inner_prod(Normal_s,Normal_l);
const double& dot_r = inner_prod(Normal_s,Normal_r);
if(distance_r <= distance_l)
{
if(wr==wl) /// cualquier segmento es valido
i->GetValue(CONTACT_LINK_MASTER) = first;
else if(wr > wl) /// segmento derecho
{
if(dot_l < dot_r)
i->GetValue(CONTACT_LINK_MASTER) = last;
else
i->GetValue(CONTACT_LINK_MASTER) = first;
}
else if(wr < wl) /// segmento derecho
{
if(dot_l < dot_r)
i->GetValue(CONTACT_LINK_MASTER) = last;
else
i->GetValue(CONTACT_LINK_MASTER) = first;
}
}
if(distance_r > distance_l)
{
if(wr==wl) /// cualquier segmento es valido
i->GetValue(CONTACT_LINK_MASTER) = first;
else if(wr > wl) /// segmento derecho
{
if(dot_l < dot_r)
i->GetValue(CONTACT_LINK_MASTER) = last;
else
i->GetValue(CONTACT_LINK_MASTER) = first;
}
else if(wr < wl) /// segmento derecho
{
if(dot_l < dot_r)
i->GetValue(CONTACT_LINK_MASTER) = last;
else
i->GetValue(CONTACT_LINK_MASTER) = first;
}
}
// if( i->Id()==44) //|| i->Id()==68 || i->Id()==49) //|| i->Id()==189) //(i->Id()==926 || i->Id()==927 || i->Id()==910 || i->Id()==919 || i->Id()==905 || i->Id()==904 )
// {
// KRATOS_WATCH(i->Id())
// // KRATOS_WATCH(ID)
// // KRATOS_WATCH(first->Id())
// // KRATOS_WATCH(last->Id())
// // KRATOS_WATCH(pr)
// // KRATOS_WATCH(pl)
// // KRATOS_WATCH(wr)
// // KRATOS_WATCH(wl)
// // KRATOS_WATCH(gr)
// // KRATOS_WATCH(gl)
// // KRATOS_WATCH(distance_r)
// // KRATOS_WATCH(distance_l)
// // KRATOS_WATCH(Normal_r)
// // KRATOS_WATCH(Normal_l)
// // KRATOS_WATCH(Normal_s)
// // KRATOS_WATCH(dot_l)
// // KRATOS_WATCH(dot_r)
// KRATOS_WATCH(i->GetValue(NEAR_NODE)->Id());
// KRATOS_WATCH(i->GetValue(CONTACT_LINK_MASTER)->Id());
// // const int& id = i->GetValue(CONTACT_LINK_MASTER)->Id();
// // if(id==58) { KRATOS_WATCH(id); KRATOS_THROW_ERROR(std::logic_error, "" , "");}
// // if(id==46) { KRATOS_WATCH(id); KRATOS_THROW_ERROR(std::logic_error, "" , "");}
// KRATOS_WATCH("---------------------")
// }
}
}
}
}
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
template<class TConfigure>
void LocalSearch2D( BinsObjectDynamic<TConfigure>& rBins,
const IteratorType& it_begin,
const IteratorType& it_end)
{
std::cout<< " LOCAL SEARCH ALGORITHM " <<std::endl;
unsigned int I = 0;
double compare_distance = 0.00;
ResultContainerType Result;
array_1d<double, 3> Normal = ZeroVector(3);
array_1d<double, 3> Mid_Point = ZeroVector(3);
array_1d<double, 3> Vect = ZeroVector(3);
Segment2D rSegment;
for(IteratorType it = it_begin; it!=it_end; it++)
{
std::size_t size = rBins.SearchObjects(*it, Result);
if(size!=0)
{
Element::GeometryType& geom = (*it)->GetGeometry();
for(unsigned int i = 0; i<geom.size(); i++)
{
if(geom(i)->GetValue(IS_BOUNDARY) == 1)
{
array_1d<double, 3>& Points0 = geom.GetPoint(i);
double& distance = geom(i)->GetValue(DISTANCE);
for(ResultIteratorType rthis = Result.begin(); rthis!=Result.end(); rthis++)
{
I = 0;
WeakPointerVector<Condition>& neighb_cond = (*rthis)->GetValue(NEIGHBOUR_CONDITIONS);
if(neighb_cond.size()!=0)
{
for(WeakPointerVector<Condition>::iterator neighb = neighb_cond.begin(); neighb!= neighb_cond.end(); neighb++)
{
//if(geom(i)->Id()==3)
// KRATOS_WATCH(neighb->Id())
if(neighb->GetValue(NODAL_VALUES) == 0)
{
neighb->GetValue(NODAL_VALUES) = 1;
Condition::GeometryType& geom_2 = (neighb)->GetGeometry();
//(neighb)->Calculate(NORMAL, Normal, CurrentProcessInfo);
if( (geom_2(0)->Id() != geom(i)->Id()) && (geom_2(1)->Id() != geom(i)->Id()) )
{
array_1d<double, 3>& Points1 = geom_2.GetPoint(0);
array_1d<double, 3>& Points2 = geom_2.GetPoint(1);
rSegment.AssignPointsAndComputeParameters(Points1, Points2);
compare_distance = rSegment.DistPoint2Segment2D(Points0);
if(compare_distance<distance)
{
distance = compare_distance;
geom(i)->GetValue(CONTACT_LINK_MASTER) = neighb_cond(I).lock();
}
}
I++;
}
}
}
}
//if(geom(i)->Id()==3){
//KRATOS_WATCH(geom(i)->GetValue(CONTACT_LINK_MASTER)->Id())
//KRATOS_WATCH("--------------------------")
//}
/// Reseting the values
for(ResultIteratorType rthis = Result.begin(); rthis!=Result.end(); rthis++)
{
WeakPointerVector<Condition>& neighb_cond = (*rthis)->GetValue(NEIGHBOUR_CONDITIONS);
if(neighb_cond.size()!=0)
for(WeakPointerVector<Condition>::iterator neighb = neighb_cond.begin(); neighb!= neighb_cond.end(); neighb++)
neighb->GetValue(NODAL_VALUES) = 0;
}
}
}
}
Result.clear();
}
}
//*****************************************************************************************************
//*****************************************************************************************************
void SearchNearNode2D()
{
KRATOS_TRY
std::cout<< " SEARCHING NEAR NODE " <<std::endl;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
int distance = std::distance(mBoundaryNodes.begin(), mBoundaryNodes.end());
CreatePartition(number_of_threads, distance, node_partition);
BinsDynamic<2, NodeType, NodesContainerType> BinsPoint(mBoundaryNodes.begin(), mBoundaryNodes.end());
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
NodesIteratorType it_begin = mBoundaryNodes.begin() + node_partition[k];
NodesIteratorType it_end = mBoundaryNodes.begin() + node_partition[k+1];
for(NodesIteratorType inode = it_begin; inode!=it_end; inode++)
{
(*inode)->GetValue(NEAR_NODE) = BinsPoint.SearchNearestPoint(**inode);
KRATOS_WATCH((*inode)->Id())
KRATOS_WATCH((*inode)->GetValue(NEAR_NODE)->Id())
KRATOS_WATCH("----------------------------")
}
}
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
template<class TConfigure>
void SearchNearNode2D( BinsObjectDynamic<TConfigure>& rBins,
const IteratorType& it_begin,
const IteratorType& it_end)
{
KRATOS_TRY
std::cout<< " SEARCHING NEAR NODE 2D " <<std::endl;
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
// unsigned int I = 0;
double compare_distance = 0.00;
ResultContainerType Result;
array_1d<double, 3> Vect = ZeroVector(3);
for(IteratorType it = it_begin; it!=it_end; it++)
{
Result.clear();
rBins.SearchObjects(*it, Result); ///SearchAroundObjectsInner(*it, Result); poner el cmentario para 2D
if(Result.size()!=0)
{
Element::GeometryType& geom = (*it)->GetGeometry();
for(unsigned int i = 0; i<geom.size(); i++)
{
if(geom(i)->GetValue(IS_BOUNDARY) == 1)
{
array_1d<double, 3>& Points0 = geom.GetPoint(i);
double& distance = geom(i)->GetValue(DISTANCE);
for(ResultIteratorType rthis = Result.begin(); rthis!=Result.end(); rthis++)
{
// I = 0;
WeakPointerVector<Condition>& neighb_cond = (*rthis)->GetValue(NEIGHBOUR_CONDITIONS);
if(neighb_cond.size()!=0)
{
for(WeakPointerVector<Condition>::iterator neighb = neighb_cond.begin(); neighb!= neighb_cond.end(); neighb++)
{
if(neighb->GetValue(NODAL_VALUES) == 0)
{
neighb->GetValue(NODAL_VALUES) = 1;
Condition::GeometryType& geom_2 = (neighb)->GetGeometry();
if( (geom_2(0)->Id() != geom(i)->Id()) && (geom_2(1)->Id() != geom(i)->Id()) )
{
/// buscando el nodo mas cercano
for(unsigned int k = 0; k<geom_2.size(); k++)
{
array_1d<double, 3>& Points1 = geom_2.GetPoint(k);
noalias(Vect) = Points1 - Points0;
compare_distance = norm_2(Vect);
if(compare_distance<distance)
{
distance = compare_distance;
geom(i)->GetValue(NEAR_NODE) = geom_2(k);
}
}
}
}
}
}
}
/// Reseting the values
for(ResultIteratorType rthis = Result.begin(); rthis!=Result.end(); rthis++)
{
WeakPointerVector<Condition>& neighb_cond = (*rthis)->GetValue(NEIGHBOUR_CONDITIONS);
if(neighb_cond.size()!=0)
for(WeakPointerVector<Condition>::iterator neighb = neighb_cond.begin(); neighb!= neighb_cond.end(); neighb++)
neighb->GetValue(NODAL_VALUES) = 0;
}
}
}
}
}
/// reseting the values of distance
NodesArrayType& pNodes = mr_model_part.Nodes();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> node_partition;
CreatePartition(number_of_threads, pNodes.size(), node_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k];
NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1];
for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i)
{
i->GetValue(DISTANCE) = DBL_MAX;
}
}
KRATOS_CATCH("")
}
//*****************************************************************************************************
//*****************************************************************************************************
template<class TConfigure>
void LocalSearch3D( BinsObjectDynamic<TConfigure>& rBins,
const IteratorType& it_begin,
const IteratorType& it_end)
{
KRATOS_TRY
#ifdef _OPENMP
double start_prod = omp_get_wtime();
#endif
unsigned int I = 0;
double distance = 0.00;
double compare_distance = 0.00;
ResultContainerType Result;
array_1d<double, 3> Points0;
array_1d<double, 3> Points1;
array_1d<double, 3> Points2;
array_1d<double, 3> Points3;
//array_1d<double, 3> Normal;
Plane rPlane;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> partition;
int distance_2 = int(it_end-it_begin);
CreatePartition(number_of_threads, distance_2, partition);
#pragma omp parallel for private(I, distance, compare_distance, Result, Points0, Points1, Points2, Points3)
for(int k=0; k<number_of_threads; k++)
{
IteratorType it_begin_1 = it_begin + partition[k];
IteratorType it_end_1 = it_begin + partition[k+1];
for(IteratorType it =it_begin_1; it!=it_end_1; it++)
//for(IteratorType it = it_begin; it!=it_end; it++)
{
rBins.SearchObjects(*it, Result);
if(Result.size()!=0)
{
Element::GeometryType& geom = (*it)->GetGeometry();
for(unsigned int i = 0; i<geom.size(); i++)
{
if(geom[i].GetValue(NODAL_VALUES)==0)
{
if(geom[i].GetValue(IS_BOUNDARY) == 1)
{
geom[i].SetLock();
geom[i].GetValue(NODAL_VALUES) = 1;
geom[i].UnSetLock();
Points0[0] = geom[i].X();
Points0[1] = geom[i].Y();
Points0[2] = geom[i].Z();
distance = DBL_MAX;
for(ResultIteratorType rthis = Result.begin(); rthis!=Result.end(); rthis++)
{
I = 0;
//KRATOS_WATCH((*rthis)->Id())
WeakPointerVector<Condition>& neighb_cond = (*rthis)->GetValue(NEIGHBOUR_CONDITIONS);
if(neighb_cond.size()!=0)
{
for(WeakPointerVector<Condition>::iterator neighb = neighb_cond.begin(); neighb!= neighb_cond.end(); neighb++)
{
Condition::GeometryType& geom_2 = (neighb)->GetGeometry();
//KRATOS_WATCH((neighb)->Id())
//KRATOS_WATCH(geom_2[0].Id())
//KRATOS_WATCH(geom_2[1].Id())
//KRATOS_WATCH(geom_2[2].Id())
if((geom_2[0].Id() != geom[i].Id()) && (geom_2[1].Id() != geom[i].Id()) && (geom_2[2].Id()!= geom[i].Id()))
{
Points1[0] = geom_2[0].X();
Points1[1] = geom_2[0].Y();
Points1[2] = geom_2[0].Z();
Points2[0] = geom_2[1].X();
Points2[1] = geom_2[1].Y();
Points2[2] = geom_2[1].Z();
Points3[0] = geom_2[2].X();
Points3[1] = geom_2[2].Y();
Points3[2] = geom_2[2].Z();
compare_distance = rPlane.DistPoint3Triangle3(Points0, Points1, Points2, Points3);
//KRATOS_WATCH(compare_distance)
if(compare_distance<distance)
{
distance = compare_distance;
geom[i].SetLock();
geom[i].GetValue(CONTACT_LINK_MASTER) = neighb_cond(I).lock();
geom[i].UnSetLock();
}
}
I++;
}
}
}
//KRATOS_WATCH(geom[i].Id())
//KRATOS_WATCH(Result.size())
//KRATOS_WATCH(geom[i].GetValue(CONTACT_LINK_MASTER))
//KRATOS_WATCH("-----------------")
}
}
}
}
Result.clear();
}
}
//std::cout<< " LOCAL SEARCH ALGORITHM " <<std::endl;
#ifdef _OPENMP
double stop_prod = omp_get_wtime();
std::cout << " Time Searching Masters Surfaces = " << stop_prod - start_prod << " seconds " << std::endl;
#endif
KRATOS_CATCH("")
}
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++)
partitions[i] = partitions[i - 1] + partition_size;
}
inline void V3DCro( double& x1,
double& y1,
double& z1,
const double& x2,
const double& y2,
const double& z2,
const double& x3,
const double& y3,
const double& z3)
{
x1 =(y2)*(z3)-(z2)*(y3);
y1 =(z2)*(x3)-(x2)*(z3);
z1 =(x2)*(y3)-(y2)*(x3);
}
inline void V3DNor( double& s,
double& x1,
double& y1,
double& z1)
{
s= std::sqrt((x1)*(x1)+(y1)*(y1)+(z1)*(z1));
if((s)>EPSILON)(x1)=(x1)/(s);
if((s)>EPSILON)(y1)=(y1)/(s);
if((s)>EPSILON)(z1)=(z1)/(s);
}
inline void V3DDot(double& s,
const double& x1,
const double& y1,
const double& z1,
const double& x2,
const double& y2,
const double& z2)
{
s = ((x1)*(x2))+((y1)*(y2))+((z1)*(z2));
}
};
} // namespace Kratos.
#endif // KRATOS_GEOMETRY_UTILITIES_INCLUDED defined
|
openmp-simd-2.c | /* { dg-do compile } */
/* { dg-options "-fopenmp-simd -fdump-tree-original" } */
extern void abort ();
int a[1024] __attribute__((aligned (32))) = { 1 };
struct S { int s; };
#pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s)
#pragma omp declare reduction (foo:struct S:omp_out.s += omp_in.s)
#pragma omp declare reduction (foo:int:omp_out += omp_in)
__attribute__((noinline, noclone)) int
foo (void)
{
int i, u = 0;
struct S s, t;
s.s = 0; t.s = 0;
#pragma omp simd aligned(a : 32) reduction(+:s) reduction(foo:t, u)
for (i = 0; i < 1024; i++)
{
int x = a[i];
s.s += x;
t.s += x;
u += x;
}
if (t.s != s.s || u != s.s)
abort ();
return s.s;
}
void bar(int n, float *a, float *b)
{
int i;
#pragma omp parallel for simd num_threads(4) safelen(64)
for (i = 0; i < n ; i++)
a[i] = b[i];
}
/* { dg-final { scan-tree-dump-times "pragma omp simd reduction\\(u\\) reduction\\(t\\) reduction\\(\\+:s\\) aligned\\(a:32\\)" 1 "original" } } */
/* { dg-final { scan-tree-dump-times "pragma omp simd safelen\\(64\\)" 1 "original" } } */
/* { dg-final { scan-tree-dump-not "omp parallel" "original" } } */
/* { dg-final { scan-tree-dump-not "omp for" "original" } } */
|
target_teams_distribute_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute'}}
#pragma omp target teams distribute
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute'}}
#pragma omp target teams distribute foo
void test_no_clause() {
int i;
#pragma omp target teams distribute
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target teams distribute' must be a for loop}}
#pragma omp target teams distribute
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target teams distribute
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
#pragma omp target teams distribute, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target teams distribute' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
#pragma omp target teams distribute collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute', but found only 1}}
// expected-error@+1 {{integer constant expression}}
#pragma omp target teams distribute collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp target teams distribute collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{loop iteration variable in the associated loop of 'omp target teams distribute' directive may not be firstprivate, predetermined as private}}
// expected-note@+1 {{defined as firstprivate}}
#pragma omp target teams distribute collapse(2) firstprivate(i)
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
#pragma omp parallel for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
// expected-error@+1 {{lastprivate variable cannot be firstprivate}} expected-note@+1 {{defined as lastprivate}}
#pragma omp target teams distribute lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{lastprivate variable cannot be firstprivate}} expected-note@+1 2 {{defined as lastprivate}}
#pragma omp target teams distribute lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 3 {{lastprivate variable cannot be firstprivate}} expected-note@+1 3 {{defined as lastprivate}}
#pragma omp target teams distribute lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
1502.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(i, j) collapse(#P12) schedule(#P9, #P11) num_threads(#P11)
#pragma omp parallel for schedule(static, 28) simd
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
eval_cf.c | /*******************************************************************************
* 2pt_box/eval_cf.c: this file is part of the FCFC program.
* FCFC: Fast Correlation Function Calculator.
* Github repository:
https://github.com/cheng-zhao/FCFC
* Copyright (c) 2020 -- 2021 Cheng Zhao <zhaocheng03@gmail.com> [MIT license]
*******************************************************************************/
#include "eval_cf.h"
#include "count_func.h"
#include "read_file.h"
#include "read_res.h"
#include "save_res.h"
#include "legpoly.h"
#include "build_tree.h"
#include <stdlib.h>
#ifdef OMP
#include <omp.h>
#endif
/*============================================================================*\
Macros for error handling
\*============================================================================*/
#define CLEAN_TREE \
for (int ii = 0; ii < cf->ncat; ii++) \
tree_destroy(tree[ii], FCFC_TREE_TYPE_KDTREE); \
free(tree);
/*============================================================================*\
Functions for steps of correlation function evaluations
\*============================================================================*/
/******************************************************************************
Function `eval_pairs`:
Evaluate pair counts for the input catalogs.
Arguments:
* `conf`: structure for storing configurations;
* `cf`: structure for correlation function evaluations.
Return:
Zero on success; non-zero on error.
******************************************************************************/
static int eval_pairs(const CONF *conf, CF *cf) {
/* Allocate memory for trees. */
void **tree;
if (!(tree = malloc(sizeof(void *) * cf->ncat))) {
P_ERR("failed to allocate memory for trees\n");
return FCFC_ERR_MEMORY;
}
for (int i = 0; i < cf->ncat; i++) tree[i] = NULL;
int ny;
if (cf->bintype == FCFC_BIN_SPI) ny = cf->np;
else ny = cf->nmu; /* cf->bintype == FCFC_BIN_ISO or FCFC_BIN_SMU */
for (int i = 0; i < cf->npc; i++) {
/* Read pair counts from file if applicable. */
if (!cf->comp_pc[i]) {
printf("Reading %s pairs ...", conf->pc[i]);
if (conf->verbose) printf("\n Filename: %s\n", conf->pcout[i]);
fflush(stdout);
int e = read_pair_count(conf->pcout[i], cf->ns, ny, cf->ncnt[i]);
if (e) {
CLEAN_TREE;
return e;
}
printf(FMT_DONE);
continue;
}
int cat[2];
cat[0] = cf->pc_idx[0][i];
cat[1] = cf->pc_idx[1][i];
/* Read catalogue and construct the tree if necessary. */
for (int j = 0; j < 2; j++) {
int idx = cat[j];
if (!tree[idx]) {
tree[idx] = tree_create(conf, cf, idx, FCFC_TREE_TYPE_KDTREE);
if (!tree[idx]) {
CLEAN_TREE;
return FCFC_ERR_TREE;
}
}
}
/* Count pairs. */
printf("Counting %c%c pairs ...", cf->label[cat[0]], cf->label[cat[1]]);
if (conf->verbose) printf("\n");
fflush(stdout);
if (cat[0] == cat[1]) { /* auto pairs */
count_pairs(tree[cat[0]], tree[cat[0]], cf, cf->cnt[i], true);
/* Double auto pairs. */
for (size_t k = 0; k < cf->ntot; k++) cf->cnt[i][k] *= 2;
cf->norm[i] = (double) cf->ndata[cat[0]] * (cf->ndata[cat[0]] - 1);
}
else { /* cross counts */
count_pairs(tree[cat[0]], tree[cat[1]], cf, cf->cnt[i], false);
cf->norm[i] = (double) cf->ndata[cat[0]] * cf->ndata[cat[1]];
}
/* Normalise pair counts. */
for (size_t k = 0; k < cf->ntot; k++)
cf->ncnt[i][k] = cf->cnt[i][k] / cf->norm[i];
/* Save pair counts. */
int e = save_res(conf, cf, i, FCFC_OUTPUT_PAIR_COUNT);
if (e) {
CLEAN_TREE;
return e;
}
/* Release memory if necessary. */
for (int j = 0; j < 2; j++) {
bool free_data = true;
for (int k = i + 1; k < cf->npc; k++) {
if (cat[j] == cf->pc_idx[0][k] || cat[j] == cf->pc_idx[1][k]) {
free_data = false;
break;
}
}
if (free_data) {
free(cf->data[cat[j]]);
cf->data[cat[j]] = NULL;
tree_destroy(tree[cat[j]], FCFC_TREE_TYPE_KDTREE);
tree[cat[j]] = NULL;
}
}
printf(FMT_DONE);
}
CLEAN_TREE;
/* Compute analytical RR if necessary. */
if (cf->rr) {
if (cf->bintype == FCFC_BIN_SPI) {
double fac = 2 * M_PI / ((double) cf->bsize * cf->bsize * cf->bsize);
for (int i = 0; i < cf->np; i++) {
double dpi = cf->pbin[i + 1] - cf->pbin[i];
for (int j = 0; j < cf->ns; j++) {
cf->rr[j + i * cf->ns] =
fac * dpi * (cf->s2bin[j + 1] - cf->s2bin[j]);
}
}
}
else { /* cf->bintype == FCFC_BIN_ISO or FCFC_BIN_SMU */
double fac = 4 * M_PI /
(3 * cf->nmu * (double) cf->bsize * cf->bsize * cf->bsize);
for (int i = 0; i < cf->ns; i++) {
double rr = fac *
(cf->s2bin[i + 1] * cf->sbin[i + 1] - cf->s2bin[i] * cf->sbin[i]);
for (int j = 0; j < cf->nmu; j++) cf->rr[i + j * cf->ns] = rr;
}
}
}
return 0;
}
/******************************************************************************
Function `eval_cf_exp`:
Evaluate correlation functions given the expressions for estimators.
Arguments:
* `conf`: structure for storing configurations;
* `cf`: structure for correlation function evaluations.
Return:
Zero on success; non-zero on error.
******************************************************************************/
static int eval_cf_exp(const CONF *conf, CF *cf) {
printf("Evaluate correlation function estimators ...");
if (conf->verbose) printf("\n");
fflush(stdout);
/* Prepare the array of normalised pair counts, for libast evaluations. */
#ifdef OMP
double *pc = calloc((size_t) cf->nthread * (cf->npc + 1), sizeof(double));
#else
double *pc = calloc(cf->npc + 1, sizeof(double));
#endif
if (!pc) {
P_ERR("failed to allocate memory for 2PCF evaluation\n");
return FCFC_ERR_MEMORY;
}
for (int i = 0; i < cf->ncf; i++) {
#ifdef OMP
#pragma omp parallel num_threads(cf->nthread) firstprivate(pc)
{
const int tid = omp_get_thread_num();
pc += (size_t) tid * (cf->npc + 1);
#pragma omp for
#endif
for (size_t j = 0; j < cf->ntot; j++) {
/* Set pair counts to be passed to libast. */
for (int k = 0; k < cf->npc; k++) pc[k] = cf->ncnt[k][j];
if (cf->rr) pc[cf->npc] = cf->rr[j];
/* Evaluate the 2PCF. */
if (ast_eval_num(cf->ast_cf[i], cf->cf[i] + j, pc, cf->npc + 1)) {
ast_perror(cf->ast_cf[i], stderr,
FMT_ERR " failed to evaluate 2PCF:");
#ifdef OMP
exit(FCFC_ERR_AST);
#else
free(pc); return FCFC_ERR_AST;
#endif
}
}
#ifdef OMP
}
#endif
/* Save the correlation function. */
int e = save_res(conf, cf, i, FCFC_OUTPUT_2PCF_RAW);
if (e) {
free(pc); return e;
}
}
free(pc);
printf(FMT_DONE);
return 0;
}
/******************************************************************************
Function `eval_cf_mp`:
Evaluate correlation function multipoles given xi(s,mu).
Arguments:
* `conf`: structure for storing configurations;
* `cf`: structure for correlation function evaluations.
Return:
Zero on success; non-zero on error.
******************************************************************************/
static int eval_cf_mp(const CONF *conf, CF *cf) {
printf("Compute correlation function multipoles ...");
if (conf->verbose) printf("\n");
fflush(stdout);
for (int i = 0; i < cf->ncf; i++) {
for (int l = 0; l < cf->nl; l++) {
int ell = cf->poles[l];
double fac = (2 * ell + 1) / (double) cf->nmu;
for (int j = 0; j < cf->nmu; j++) {
double mu = (j + 0.5) / (double) cf->nmu;
for (int k = 0; k < cf->ns; k++) {
cf->mp[i][k + l * cf->ns] +=
cf->cf[i][k + j * cf->ns] * fac * legpoly(ell, mu);
}
}
}
/* Save the correlation function multipoles. */
int e = save_res(conf, cf, i, FCFC_OUTPUT_2PCF_INTEG);
if (e) return e;
}
printf(FMT_DONE);
return 0;
}
/******************************************************************************
Function `eval_cf_wp`:
Evaluate projected correlation functions given xi(s_perp,pi).
Arguments:
* `conf`: structure for storing configurations;
* `cf`: structure for correlation function evaluations.
Return:
Zero on success; non-zero on error.
******************************************************************************/
static int eval_cf_wp(const CONF *conf, CF *cf) {
printf("Compute projected correlation functions ...");
if (conf->verbose) printf("\n");
fflush(stdout);
for (int i = 0; i < cf->ncf; i++) {
for (int j = 0; j < cf->np; j++) {
double dpi = cf->pbin[j + 1] - cf->pbin[j];
for (int k = 0; k < cf->ns; k++) {
cf->wp[i][k] += 2 * cf->cf[i][k + j * cf->ns] * dpi;
}
}
/* Save the projected correlation functions. */
int e = save_res(conf, cf, i, FCFC_OUTPUT_2PCF_INTEG);
if (e) return e;
}
printf(FMT_DONE);
return 0;
}
/*============================================================================*\
Interface for correlation function evaluations
\*============================================================================*/
/******************************************************************************
Function `eval_cf`:
Evaluate correlation functions.
Arguments:
* `conf`: structure for storing configurations;
* `cf`: structure for correlation function evaluations.
Return:
Zero on success; non-zero on error.
******************************************************************************/
int eval_cf(const CONF *conf, CF *cf) {
int e;
if ((e = eval_pairs(conf, cf))) return e;
if (cf->ncf) if ((e = eval_cf_exp(conf, cf))) return e;
if (cf->nl) if ((e = eval_cf_mp(conf, cf))) return e;
if (cf->comp_wp) if ((e = eval_cf_wp(conf, cf))) return e;
return 0;
}
|
GB_unop__lnot_uint32_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lnot_uint32_uint32
// op(A') function: GB_unop_tran__lnot_uint32_uint32
// C type: uint32_t
// A type: uint32_t
// cast: uint32_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lnot_uint32_uint32
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = !(z != 0) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lnot_uint32_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
shallow_base_openmp_v3.c | /* Code converted from shallow_base.f90 using F2C-ACC program.
* Manually replaced:
* - WRITE statements with printf
* - MOD operator with %
* - system_clock with wtime
* Fixed several of the array references which had x dimension as 1,
* instead of M_LEN.
* Fixed values set using d and e notation.
* (7 June 2011)
***************
* 'Pure' C version developed by G.D Riley (UoM) (25 Jan 2012)
* removed all ftocmacros
* used sin and cos not sinf and cosf (since all data are doubles)
* needed to declare arrays +1 to cope with Fortran indexing
* Compile, e.g.:
* gcc -O2 -fopenmp -o sb shallow_base_openmp_v3.c wtime.c -lm
** NOTE: May need to set 'ulimit -s unlimited' to run large
* problems (e.g. 512x512).
* Results are consistent with Fortran version of the code.
* GDR: July 2013
* Applied static, chunk scheduling for load balance,as described
* in Michail Pappas' MSc thesis (2012).
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#define MIN(x,y) ((x)>(y)?(y):(x))
#define MAX(x,y) ((x)>(y)?(x):(y))
#define TRUE 1
#define FALSE 0
#define M 128
#define N 128
#define M_LEN M + 1
#define N_LEN N + 1
#define ITMAX 2000
#define L_OUT TRUE
extern double wtime();
//===================================================
double compute_checksum(double field[M_LEN][N_LEN],
int lenx, int leny)
{
int i, j;
double sum = 0.0;
for(i=0;i<lenx;i++){
for(j=0;j<leny;j++){
sum += field[i][j];
}
}
return sum;
}
//===================================================
//! Benchmark weather prediction program for comparing the
//! preformance of current supercomputers. The model is
//! based on the paper - The Dynamics of Finite-Difference
//! Models of the Shallow-Water Equations, by Robert Sadourny
//! J. Atm. Sciences, Vol 32, No 4, April 1975.
//!
//! Code by Paul N. Swarztrauber, National Center for
//! Atmospheric Research, Boulder, Co, October 1984.
//! Modified by Juliana Rew, NCAR, January 2006
//!
//! In this version, shallow4.f, initial and calculated values
//! of U, V, and P are written to a netCDF file
//! for later use in visualizing the results. The netCDF data
//! management library is freely available from
//! http://www.unidata.ucar.edu/software/netcdf
//! This code is still serial but has been brought up to modern
//! Fortran constructs and uses portable intrinsic Fortran 90 timing routines.
//! This can be compiled on the IBM SP using:
//! xlf90 -qmaxmem=-1 -g -o shallow4 -qfixed=132 -qsclk=micro \
//! -I/usr/local/include shallow4.f -L/usr/local/lib32/r4i4 -l netcdf
//! where the -L and -I point to local installation of netCDF
//!
//! Changes from shallow4.f (Annette Osprey, January 2010):
//! - Converted to free-form fortran 90.
//! - Some tidying up of old commented-out code.
//! - Explicit type declarations.
//! - Variables n, m, ITMAX and mprint read in from namelist.
//! - Dynamic array allocation.
//! - Only write to netcdf at mprint timesteps.
//! - Don't write wrap-around points to NetCDF file.
//! - Use 8-byte reals.
//!
//! Further changes (Annette Osprey & Graham Riley, February 2011):
//! - Remove unnecessary halo updates.
//! - Split loops to improve TLB access.
//! - Modify timers to accumulate loop times over whole run.
//! - Remove old-style indentation.
//!
//! Minimal serial version (26 May 2011)
int main(int argc, char **argv) {
// solution arrays
double u[M_LEN][N_LEN],v[M_LEN][N_LEN],p[M_LEN][N_LEN];
double unew[M_LEN][N_LEN],vnew[M_LEN][N_LEN],pnew[M_LEN][N_LEN];
double uold[M_LEN][N_LEN],vold[M_LEN][N_LEN],pold[M_LEN][N_LEN];
double cu[M_LEN][N_LEN],cv[M_LEN][N_LEN],z[M_LEN][N_LEN],h[M_LEN][N_LEN],psi[M_LEN][N_LEN];
double dt,tdt,dx,dy,a,alpha,el,pi;
double tpi,di,dj,pcf;
double tdts8,tdtsdx,tdtsdy,fsdx,fsdy;
int mnmin,ncycle;
int i,j;
int nthreads, chunk_size;
// timer variables
double t100,t200,t300;
double tstart,ctime,tcyc,time;
double t100i,t200i,t300i;
double c1,c2;
// ** Initialisations **
nthreads = 1;
#ifdef _OPENMP
nthreads = omp_get_max_threads();
#endif
chunk_size = (int) ceil( (float)M / (float)nthreads);
// Note below that two delta t (tdt) is set to dt on the first
// cycle after which it is reset to dt+dt.
dt = 90.;
tdt = dt;
dx = 100000.;
dy = 100000.;
fsdx = 4. / dx;
fsdy = 4. / dy;
a = 1000000.;
alpha = .001;
el = N * dx;
pi = 4. * atanf(1.);
tpi = pi + pi;
di = tpi / M;
dj = tpi / N;
pcf = pi * pi * a * a / (el * el);
// Initial values of the stream function and p
#pragma omp parallel for default (shared) private(i,j)
for (i=0;i<M_LEN;i++) {
for (j=0;j<N_LEN;j++) {
psi[i][j] = a * sin((i + .5) * di) * sin((j + .5) * dj);
p[i][j] = pcf * (cos(2. * (i) * di) + cos(2. * (j) * dj)) + 50000.;
}
}
// Initialize velocities
#pragma omp parallel for default (shared) private(i,j)
for (i=0;i<M;i++) {
for (j=0;j<N;j++) {
u[i + 1][j] = -(psi[i + 1][j + 1] - psi[i + 1][j]) / dy;
v[i][j + 1] = (psi[i + 1][j + 1] - psi[i][j + 1]) / dx;
}
}
// Periodic continuation
for (j=0;j<N;j++) {
u[0][j] = u[M][j];
v[M][j + 1] = v[0][j + 1];
}
for (i=0;i<M;i++) {
u[i + 1][N] = u[i + 1][0];
v[i][0] = v[i][N];
}
u[0][N] = u[M][0];
v[M][0] = v[0][N];
#pragma omp parallel default (shared) private(i,j)
for (i=0;i<M_LEN;i++) {
for (j=0;j<N_LEN;j++) {
uold[i][j] = u[i][j];
vold[i][j] = v[i][j];
pold[i][j] = p[i][j];
}
}
// Print initial values
if ( L_OUT ) {
printf(" number of points in the x direction %d\n", N);
printf(" number of points in the y direction %d\n", M);
printf(" grid spacing in the x direction %f\n", dx);
printf(" grid spacing in the y direction %f\n", dy);
printf(" time step %f\n", dt);
printf(" time filter parameter %f\n", alpha);
}
// Start timer
tstart = wtime();
time = 0.;
t100 = 0.;
t200 = 0.;
t300 = 0.;
// ** Start of time loop **
#pragma omp parallel default (shared) private(i,j,ncycle,tdts8,tdtsdx,tdtsdy) firstprivate(tdt)
for (ncycle=1;ncycle<=ITMAX;ncycle++) {
// Compute capital u, capital v, z and h
#pragma omp master
c1 = wtime();
#pragma omp for schedule (static,chunk_size) nowait
for (i=0;i<M;i++) {
for (j=0;j<N;j++) {
cu[i + 1][j] = .5 * (p[i + 1][j] + p[i][j]) * u[i + 1][j];
}
}
#pragma omp for schedule (static,chunk_size) nowait
for (i=0;i<M;i++) {
for (j=0;j<N;j++) {
cv[i][j + 1] = .5 * (p[i][j + 1] + p[i][j]) * v[i][j + 1];
}
}
#pragma omp for schedule (static,chunk_size) nowait
for (i=0;i<M;i++) {
for (j=0;j<N;j++) {
z[i + 1][j + 1] = (fsdx * (v[i + 1][j + 1] - v[i][j + 1]) - fsdy * (u[i + 1][j + 1] - u[i + 1][j])) / (p[i][j] + p[i + 1][j] + p[i + 1][j + 1] + p[i][j + 1]);
}
}
#pragma omp for schedule (static,chunk_size)
for (i=0;i<M;i++) {
for (j=0;j<N;j++) {
h[i][j] = p[i][j] + .25 * (u[i + 1][j] * u[i + 1][j] + u[i][j] * u[i][j] + v[i][j + 1] * v[i][j + 1] + v[i][j] * v[i][j]);
}
}
#pragma omp master
{
c2 = wtime();
t100 = t100 + (c2 - c1);
}
// Periodic continuation
#pragma omp single
{
for (j=0;j<N;j++) {
cu[0][j] = cu[M][j];
cv[M][j + 1] = cv[0][j + 1];
z[0][j + 1] = z[M][j + 1];
h[M][j] = h[0][j];
}
cu[0][N] = cu[M][0];
cv[M][0] = cv[0][N];
z[0][0] = z[M][N];
h[M][N] = h[0][0];
}
#pragma omp for schedule (static,chunk_size)
for (i=0;i<M;i++) {
cu[i + 1][N] = cu[i + 1][0];
cv[i][0] = cv[i][N];
z[i + 1][0] = z[i + 1][N];
h[i][N] = h[i][0];
}
// Compute new values u,v and p
tdts8 = tdt / 8.;
tdtsdx = tdt / dx;
tdtsdy = tdt / dy;
#pragma omp master
c1 = wtime();
#pragma omp for schedule (static,chunk_size) nowait
for (i=0;i<M;i++) {
for (j=0;j<N;j++) {
unew[i + 1][j] = uold[i + 1][j] + tdts8 * (z[i + 1][j + 1] + z[i + 1][j]) * (cv[i + 1][j + 1] + cv[i][j + 1] + cv[i][j] + cv[i + 1][j]) - tdtsdx * (h[i + 1][j] - h[i][j]);
}
}
#pragma omp for schedule (static,chunk_size) nowait
for (i=0;i<M;i++) {
for (j=0;j<N;j++) {
vnew[i][j + 1] = vold[i][j + 1] - tdts8 * (z[i + 1][j + 1] + z[i][j + 1]) * (cu[i + 1][j + 1] + cu[i][j + 1] + cu[i][j] + cu[i + 1][j]) - tdtsdy * (h[i][j + 1] - h[i][j]);
}
}
#pragma omp for schedule (static,chunk_size)
for (i=0;i<M;i++) {
for (j=0;j<N;j++) {
pnew[i][j] = pold[i][j] - tdtsdx * (cu[i + 1][j] - cu[i][j]) - tdtsdy * (cv[i][j + 1] - cv[i][j]);
}
}
#pragma omp master
{
c2 = wtime();
t200 = t200 + (c2 - c1);
}
// Periodic continuation
#pragma omp single
{
for (j=0;j<N;j++) {
unew[0][j] = unew[M][j];
vnew[M][j + 1] = vnew[0][j + 1];
pnew[M][j] = pnew[0][j];
}
unew[0][N] = unew[M][0];
vnew[M][0] = vnew[0][N];
pnew[M][N] = pnew[0][0];
}
#pragma omp for schedule (static,chunk_size)
for (i=0;i<M;i++) {
unew[i + 1][N] = unew[i + 1][0];
vnew[i][0] = vnew[i][N];
pnew[i][N] = pnew[i][0];
}
#pragma omp master
time = time + dt;
// Time smoothing and update for next cycle
if ( ncycle > 1 ) {
#pragma omp master
c1 = wtime();
#pragma omp for schedule (static,chunk_size) nowait
for (i=0;i<M_LEN;i++) {
for (j=0;j<N_LEN;j++) {
uold[i][j] = u[i][j] + alpha * (unew[i][j] - 2. * u[i][j] + uold[i][j]);
}
}
#pragma omp for schedule (static,chunk_size) nowait
for (i=0;i<M_LEN;i++) {
for (j=0;j<N_LEN;j++) {
vold[i][j] = v[i][j] + alpha * (vnew[i][j] - 2. * v[i][j] + vold[i][j]);
}
}
#pragma omp for schedule (static,chunk_size) nowait
for (i=0;i<M_LEN;i++) {
for (j=0;j<N_LEN;j++) {
pold[i][j] = p[i][j] + alpha * (pnew[i][j] - 2. * p[i][j] + pold[i][j]);
}
}
#pragma omp for schedule (static,chunk_size) nowait
for (i=0;i<M_LEN;i++) {
for (j=0;j<N_LEN;j++) {
u[i][j] = unew[i][j];
}
}
#pragma omp for schedule (static,chunk_size) nowait
for (i=0;i<M_LEN;i++) {
for (j=0;j<N_LEN;j++) {
v[i][j] = vnew[i][j];
}
}
#pragma omp for schedule (static,chunk_size)
for (i=0;i<M_LEN;i++) {
for (j=0;j<N_LEN;j++) {
p[i][j] = pnew[i][j];
}
}
#pragma omp master
{
c2 = wtime();
t300 = t300 + (c2 - c1);
}
} else {
tdt = tdt + tdt;
#pragma omp for schedule (static,chunk_size)
for (i=0;i<M_LEN;i++) {
for (j=0;j<N_LEN;j++) {
uold[i][j] = u[i][j];
vold[i][j] = v[i][j];
pold[i][j] = p[i][j];
u[i][j] = unew[i][j];
v[i][j] = vnew[i][j];
p[i][j] = pnew[i][j];
}
}
}
}
// ** End of time loop **
fprintf(stdout, "P CHECKSUM after %d steps = %15.7e\n",
ITMAX, compute_checksum(pnew,M_LEN,N_LEN));
fprintf(stdout, "U CHECKSUM after %d steps = %15.7e\n",
ITMAX, compute_checksum(unew,M_LEN,N_LEN));
fprintf(stdout, "V CHECKSUM after %d steps = %15.7e\n",
ITMAX, compute_checksum(vnew,M_LEN,N_LEN));
// Output p, u, v fields and run times.
if (L_OUT) {
c2 = wtime();
ctime = c2 - tstart;
tcyc = ctime / ITMAX;
fprintf(stdout,"\n");
fprintf(stdout," Job run on %d threads with a chunk size of %d\n",
nthreads, chunk_size);
fprintf(stdout," No. of steps = %d, total time = %f, time per cycle = %f (s)\n",
ITMAX, ctime, tcyc);
fprintf(stdout," time for c{u,v},z,h calc = %.6f s\n", t100);
fprintf(stdout," time for {u,v,p}new calc = %.6f s\n", t200);
fprintf(stdout," time for time-smoothing = %.6f s\n", t300);
}
return(0);
}
|
hsrp_fmt_plug.c | /*
* Cracker for MD5 authentication in HSRP, HSRPv2, VRRP, and GLBP.
* http://www.rfc-editor.org/rfc/rfc1828.txt
*
* This is dedicated to Darya. You inspire me.
*
* This software is Copyright (c) 2014, Dhiru Kholia <dhiru [at] openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* optimized Feb 2016, JimF.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_hsrp;
#elif FMT_REGISTERS_H
john_register_one(&fmt_hsrp);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
// OMP_SCALE tuned on core i7 4-core HT
// 2048 - 8850k 6679k
// 4096 - 10642k 7278k
// 8192 - 10489k 7532k
// 16k - 10413k 7694k
// 32k - 12111k 7803k ** this value chosen
// 64k - 12420k 6523k
// 128k - 12220k 6741k
#ifdef __MIC__
#ifndef OMP_SCALE
#define OMP_SCALE 8192
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 32768
#endif
#endif
#endif
#include "arch.h"
#include "md5.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "hsrp"
#define FORMAT_NAME "\"MD5 authentication\" HSRP, HSRPv2, VRRP, GLBP"
#define FORMAT_TAG "$hsrp$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 55 // Must fit in a single MD5 block
#define BINARY_SIZE 16
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_SIZE sizeof(struct custom_salt)
#define REAL_SALT_SIZE 50
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"$hsrp$000004030a64010000000000000000000a000064041c010000000a0000140000000000000000000000000000000000000000$52e1db09d18d695b8fefb3730ff8d9d6", "password12345"},
{"$hsrp$000004030a5a01000000000000000000ac102801041c01000000ac1028140000000000000000000000000000000000000000$f15dfa631a0679e0801f8e6b0c0c17ac", "openwall"},
{"$hsrp$000010030a64010000000000000000000a000064041c010000000a0000140000000000000000000000000000000000000000$f02fc41b1b516e2d1261d8800d39ccea", "openwall12345"},
/* HSRPv2 hashes */
{"$hsrp$0128020006040001aabbcc000a000000006400000bb8000027100a000064000000000000000000000000041c010000000a00000a0000000000000000000000000000000000000000$642fedafe1f374bd2fdd8f1ba81d87a2", "password"},
{"$hsrp$0128020006040001aabbcc001400000000c800000bb8000027100a000064000000000000000000000000041c010000000a0000140000000000000000000000000000000000000000$0481257f0fe583b275f03a48e88de72f", "password12345"},
{NULL}
};
static char (*saved_key)[64]; // 1 full limb of MD5, we do out work IN this buffer.
static MD5_CTX (*saved_ctx);
static int *saved_len, dirty;
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
int length;
unsigned char salt[2048]; // be safe ;)
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_num_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
saved_ctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_ctx));
}
static void done(void)
{
MEM_FREE(saved_ctx);
MEM_FREE(crypt_out);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q = NULL;
int len;
p = ciphertext;
if (strncmp(p, FORMAT_TAG, TAG_LENGTH))
return 0;
p += TAG_LENGTH;
q = strrchr(ciphertext, '$');
if (!q || q+1==p)
return 0;
q = q + 1;
// if ((q - p - 1) > REAL_SALT_SIZE * 2)
// return 0;
len = strspn(q, HEXCHARS_lc);
if (len != BINARY_SIZE * 2 || len != strlen(q))
return 0;
if (strspn(p, HEXCHARS_lc) != q - p - 1)
return 0;
if (q-p > (sizeof(cur_salt->salt)-1)*2)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
int i, len;
memset(&cs, 0, SALT_SIZE);
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
len = (strrchr(ciphertext, '$') - ciphertext) / 2;
for (i = 0; i < len; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) |
atoi16[ARCH_INDEX(ciphertext[2 * i + 1])];
cs.length = len;
return &cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
#define PUTCHAR(buf, index, val) ((unsigned char*)(buf))[index] = (val)
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
MD5_CTX ctx;
int len = saved_len[index];
if (dirty) {
// we use the saved_key buffer in-line.
unsigned int *block = (unsigned int*)saved_key[index];
MD5_Init(&saved_ctx[index]);
// set bit
saved_key[index][len] = 0x80;
block[14] = len << 3;
#if (ARCH_LITTLE_ENDIAN==0)
block[14] = JOHNSWAP(block[14]);
#endif
MD5_Update(&saved_ctx[index], (unsigned char*)block, 64);
// clear the bit, so that get_key returns proper key.
saved_key[index][len] = 0;
}
memcpy(&ctx, &saved_ctx[index], sizeof(MD5_CTX));
// data
MD5_Update(&ctx, cur_salt->salt, cur_salt->length);
// key (again)
MD5_Update(&ctx, saved_key[index], len);
MD5_Final((unsigned char*)crypt_out[index], &ctx);
}
dirty = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (((uint32_t*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void hsrp_set_key(char *key, int index)
{
int olen = saved_len[index];
int len= strlen(key);
saved_len[index] = len;
strcpy(saved_key[index], key);
if (olen > len)
memset(&(saved_key[index][len]), 0, olen-len);
dirty = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_hsrp = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
hsrp_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
matrix.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M AAA TTTTT RRRR IIIII X X %
% MM MM A A T R R I X X %
% M M M AAAAA T RRRR I X %
% M M A A T R R I X X %
% M M A A T R R IIIII X X %
% %
% %
% MagickCore Matrix Methods %
% %
% Software Design %
% Cristy %
% August 2007 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image-private.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
/*
Typedef declaration.
*/
struct _MatrixInfo
{
CacheType
type;
size_t
columns,
rows,
stride;
MagickSizeType
length;
MagickBooleanType
mapped,
synchronize;
char
path[MagickPathExtent];
int
file;
void
*elements;
SemaphoreInfo
*semaphore;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMatrixInfo() allocates the ImageInfo structure.
%
% The format of the AcquireMatrixInfo method is:
%
% MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows,
% const size_t stride,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: the matrix columns.
%
% o rows: the matrix rows.
%
% o stride: the matrix stride.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(SIGBUS)
static void MatrixSignalHandler(int status)
{
magick_unreferenced(status);
ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache");
}
#endif
static inline MagickOffsetType WriteMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX));
#else
count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
static MagickBooleanType SetMatrixExtent(
MatrixInfo *magick_restrict matrix_info,MagickSizeType length)
{
MagickOffsetType
count,
extent,
offset;
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (matrix_info->synchronize != MagickFalse)
(void) posix_fallocate(matrix_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,MatrixSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns,
const size_t rows,const size_t stride,ExceptionInfo *exception)
{
char
*synchronize;
MagickBooleanType
status;
MatrixInfo
*matrix_info;
matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info));
if (matrix_info == (MatrixInfo *) NULL)
return((MatrixInfo *) NULL);
(void) memset(matrix_info,0,sizeof(*matrix_info));
matrix_info->signature=MagickCoreSignature;
matrix_info->columns=columns;
matrix_info->rows=rows;
matrix_info->stride=stride;
matrix_info->semaphore=AcquireSemaphoreInfo();
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
matrix_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
matrix_info->length=(MagickSizeType) columns*rows*stride;
if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,matrix_info->length);
if ((status != MagickFalse) &&
(matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length)))
{
status=AcquireMagickResource(MemoryResource,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->mapped=MagickFalse;
matrix_info->elements=AcquireMagickMemory((size_t)
matrix_info->length);
if (matrix_info->elements == NULL)
{
matrix_info->mapped=MagickTrue;
matrix_info->elements=MapBlob(-1,IOMode,0,(size_t)
matrix_info->length);
}
if (matrix_info->elements == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,matrix_info->length);
}
}
matrix_info->file=(-1);
if (matrix_info->elements == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,matrix_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=DiskCache;
matrix_info->file=AcquireUniqueFileResource(matrix_info->path);
if (matrix_info->file == -1)
return(DestroyMatrixInfo(matrix_info));
status=AcquireMagickResource(MapResource,matrix_info->length);
if (status != MagickFalse)
{
status=SetMatrixExtent(matrix_info,matrix_info->length);
if (status != MagickFalse)
matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0,
(size_t) matrix_info->length);
if (matrix_info->elements != NULL)
matrix_info->type=MapCache;
else
RelinquishMagickResource(MapResource,matrix_info->length);
}
}
return(matrix_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMagickMatrix() allocates and returns a matrix in the form of an
% array of pointers to an array of doubles, with all values pre-set to zero.
%
% This used to generate the two dimensional matrix, and vectors required
% for the GaussJordanElimination() method below, solving some system of
% simultanious equations.
%
% The format of the AcquireMagickMatrix method is:
%
% double **AcquireMagickMatrix(const size_t number_rows,
% const size_t size)
%
% A description of each parameter follows:
%
% o number_rows: the number pointers for the array of pointers
% (first dimension).
%
% o size: the size of the array of doubles each pointer points to
% (second dimension).
%
*/
MagickExport double **AcquireMagickMatrix(const size_t number_rows,
const size_t size)
{
double
**matrix;
ssize_t
i,
j;
matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix));
if (matrix == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) number_rows; i++)
{
matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i]));
if (matrix[i] == (double *) NULL)
{
for (j=0; j < i; j++)
matrix[j]=(double *) RelinquishMagickMemory(matrix[j]);
matrix=(double **) RelinquishMagickMemory(matrix);
return((double **) NULL);
}
for (j=0; j < (ssize_t) size; j++)
matrix[i][j]=0.0;
}
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMatrixInfo() dereferences a matrix, deallocating memory associated
% with the matrix.
%
% The format of the DestroyImage method is:
%
% MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
LockSemaphoreInfo(matrix_info->semaphore);
switch (matrix_info->type)
{
case MemoryCache:
{
if (matrix_info->mapped == MagickFalse)
matrix_info->elements=RelinquishMagickMemory(matrix_info->elements);
else
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=(unsigned short *) NULL;
}
RelinquishMagickResource(MemoryResource,matrix_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=NULL;
RelinquishMagickResource(MapResource,matrix_info->length);
}
case DiskCache:
{
if (matrix_info->file != -1)
(void) close(matrix_info->file);
(void) RelinquishUniqueFileResource(matrix_info->path);
RelinquishMagickResource(DiskResource,matrix_info->length);
break;
}
default:
break;
}
UnlockSemaphoreInfo(matrix_info->semaphore);
RelinquishSemaphoreInfo(&matrix_info->semaphore);
return((MatrixInfo *) RelinquishMagickMemory(matrix_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G a u s s J o r d a n E l i m i n a t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussJordanElimination() returns a matrix in reduced row echelon form,
% while simultaneously reducing and thus solving the augumented results
% matrix.
%
% See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
% The format of the GaussJordanElimination method is:
%
% MagickBooleanType GaussJordanElimination(double **matrix,
% double **vectors,const size_t rank,const size_t number_vectors)
%
% A description of each parameter follows:
%
% o matrix: the matrix to be reduced, as an 'array of row pointers'.
%
% o vectors: the additional matrix argumenting the matrix for row reduction.
% Producing an 'array of column vectors'.
%
% o rank: The size of the matrix (both rows and columns).
% Also represents the number terms that need to be solved.
%
% o number_vectors: Number of vectors columns, argumenting the above matrix.
% Usally 1, but can be more for more complex equation solving.
%
% Note that the 'matrix' is given as a 'array of row pointers' of rank size.
% That is values can be assigned as matrix[row][column] where 'row' is
% typically the equation, and 'column' is the term of the equation.
% That is the matrix is in the form of a 'row first array'.
%
% However 'vectors' is a 'array of column pointers' which can have any number
% of columns, with each column array the same 'rank' size as 'matrix'.
%
% This allows for simpler handling of the results, especially is only one
% column 'vector' is all that is required to produce the desired solution.
%
% For example, the 'vectors' can consist of a pointer to a simple array of
% doubles. when only one set of simultanious equations is to be solved from
% the given set of coefficient weighted terms.
%
% double **matrix = AcquireMagickMatrix(8UL,8UL);
% double coefficents[8];
% ...
% GaussJordanElimination(matrix, &coefficents, 8UL, 1UL);
%
% However by specifing more 'columns' (as an 'array of vector columns',
% you can use this function to solve a set of 'separable' equations.
%
% For example a distortion function where u = U(x,y) v = V(x,y)
% And the functions U() and V() have separate coefficents, but are being
% generated from a common x,y->u,v data set.
%
% Another example is generation of a color gradient from a set of colors at
% specific coordients, such as a list x,y -> r,g,b,a.
%
% You can also use the 'vectors' to generate an inverse of the given 'matrix'
% though as a 'column first array' rather than a 'row first array'. For
% details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
*/
MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix,
double **vectors,const size_t rank,const size_t number_vectors)
{
#define GaussJordanSwap(x,y) \
{ \
if ((x) != (y)) \
{ \
(x)+=(y); \
(y)=(x)-(y); \
(x)=(x)-(y); \
} \
}
double
max,
scale;
ssize_t
i,
j,
k;
ssize_t
column,
*columns,
*pivots,
row,
*rows;
columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns));
rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows));
pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots));
if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) ||
(pivots == (ssize_t *) NULL))
{
if (pivots != (ssize_t *) NULL)
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
if (columns != (ssize_t *) NULL)
columns=(ssize_t *) RelinquishMagickMemory(columns);
if (rows != (ssize_t *) NULL)
rows=(ssize_t *) RelinquishMagickMemory(rows);
return(MagickFalse);
}
(void) memset(columns,0,rank*sizeof(*columns));
(void) memset(rows,0,rank*sizeof(*rows));
(void) memset(pivots,0,rank*sizeof(*pivots));
column=0;
row=0;
for (i=0; i < (ssize_t) rank; i++)
{
max=0.0;
for (j=0; j < (ssize_t) rank; j++)
if (pivots[j] != 1)
{
for (k=0; k < (ssize_t) rank; k++)
if (pivots[k] != 0)
{
if (pivots[k] > 1)
return(MagickFalse);
}
else
if (fabs(matrix[j][k]) >= max)
{
max=fabs(matrix[j][k]);
row=j;
column=k;
}
}
pivots[column]++;
if (row != column)
{
for (k=0; k < (ssize_t) rank; k++)
GaussJordanSwap(matrix[row][k],matrix[column][k]);
for (k=0; k < (ssize_t) number_vectors; k++)
GaussJordanSwap(vectors[k][row],vectors[k][column]);
}
rows[i]=row;
columns[i]=column;
if (matrix[column][column] == 0.0)
return(MagickFalse); /* sigularity */
scale=PerceptibleReciprocal(matrix[column][column]);
matrix[column][column]=1.0;
for (j=0; j < (ssize_t) rank; j++)
matrix[column][j]*=scale;
for (j=0; j < (ssize_t) number_vectors; j++)
vectors[j][column]*=scale;
for (j=0; j < (ssize_t) rank; j++)
if (j != column)
{
scale=matrix[j][column];
matrix[j][column]=0.0;
for (k=0; k < (ssize_t) rank; k++)
matrix[j][k]-=scale*matrix[column][k];
for (k=0; k < (ssize_t) number_vectors; k++)
vectors[k][j]-=scale*vectors[k][column];
}
}
for (j=(ssize_t) rank-1; j >= 0; j--)
if (columns[j] != rows[j])
for (i=0; i < (ssize_t) rank; i++)
GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]);
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
rows=(ssize_t *) RelinquishMagickMemory(rows);
columns=(ssize_t *) RelinquishMagickMemory(columns);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x C o l u m n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixColumns() returns the number of columns in the matrix.
%
% The format of the GetMatrixColumns method is:
%
% size_t GetMatrixColumns(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->columns);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixElement() returns the specifed element in the matrix.
%
% The format of the GetMatrixElement method is:
%
% MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: return the matrix element in this buffer.
%
*/
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline MagickOffsetType ReadMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX));
#else
count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) MAGICK_SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PREAD)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+
EdgeX(x,matrix_info->columns);
if (matrix_info->type != DiskCache)
{
(void) memcpy(value,(unsigned char *) matrix_info->elements+i*
matrix_info->stride,matrix_info->stride);
return(MagickTrue);
}
count=ReadMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x R o w s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixRows() returns the number of rows in the matrix.
%
% The format of the GetMatrixRows method is:
%
% size_t GetMatrixRows(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info)
{
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->rows);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L e a s t S q u a r e s A d d T e r m s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LeastSquaresAddTerms() adds one set of terms and associate results to the
% given matrix and vectors for solving using least-squares function fitting.
%
% The format of the AcquireMagickMatrix method is:
%
% void LeastSquaresAddTerms(double **matrix,double **vectors,
% const double *terms,const double *results,const size_t rank,
% const size_t number_vectors);
%
% A description of each parameter follows:
%
% o matrix: the square matrix to add given terms/results to.
%
% o vectors: the result vectors to add terms/results to.
%
% o terms: the pre-calculated terms (without the unknown coefficent
% weights) that forms the equation being added.
%
% o results: the result(s) that should be generated from the given terms
% weighted by the yet-to-be-solved coefficents.
%
% o rank: the rank or size of the dimensions of the square matrix.
% Also the length of vectors, and number of terms being added.
%
% o number_vectors: Number of result vectors, and number or results being
% added. Also represents the number of separable systems of equations
% that is being solved.
%
% Example of use...
%
% 2 dimensional Affine Equations (which are separable)
% c0*x + c2*y + c4*1 => u
% c1*x + c3*y + c5*1 => v
%
% double **matrix = AcquireMagickMatrix(3UL,3UL);
% double **vectors = AcquireMagickMatrix(2UL,3UL);
% double terms[3], results[2];
% ...
% for each given x,y -> u,v
% terms[0] = x;
% terms[1] = y;
% terms[2] = 1;
% results[0] = u;
% results[1] = v;
% LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL);
% ...
% if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) {
% c0 = vectors[0][0];
% c2 = vectors[0][1];
% c4 = vectors[0][2];
% c1 = vectors[1][0];
% c3 = vectors[1][1];
% c5 = vectors[1][2];
% }
% else
% printf("Matrix unsolvable\n");
% RelinquishMagickMatrix(matrix,3UL);
% RelinquishMagickMatrix(vectors,2UL);
%
*/
MagickPrivate void LeastSquaresAddTerms(double **matrix,double **vectors,
const double *terms,const double *results,const size_t rank,
const size_t number_vectors)
{
ssize_t
i,
j;
for (j=0; j < (ssize_t) rank; j++)
{
for (i=0; i < (ssize_t) rank; i++)
matrix[i][j]+=terms[i]*terms[j];
for (i=0; i < (ssize_t) number_vectors; i++)
vectors[i][j]+=results[i]*terms[j];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t r i x T o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatrixToImage() returns a matrix as an image. The matrix elements must be
% of type double otherwise nonsense is returned.
%
% The format of the MatrixToImage method is:
%
% Image *MatrixToImage(const MatrixInfo *matrix_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
max_value,
min_value,
scale_factor;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (matrix_info->stride < sizeof(double))
return((Image *) NULL);
/*
Determine range of matrix.
*/
(void) GetMatrixElement(matrix_info,0,0,&min_value);
max_value=min_value;
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) matrix_info->columns; x++)
{
double
value;
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
if (value < min_value)
min_value=value;
else
if (value > max_value)
max_value=value;
}
}
if ((min_value == 0.0) && (max_value == 0.0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
/*
Convert matrix to image.
*/
image=AcquireImage((ImageInfo *) NULL,exception);
image->columns=matrix_info->columns;
image->rows=matrix_info->rows;
image->colorspace=GRAYColorspace;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
value;
Quantum
*q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
value=scale_factor*(value-min_value);
*q=ClampToQuantum(value);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N u l l M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NullMatrix() sets all elements of the matrix to zero.
%
% The format of the memset method is:
%
% MagickBooleanType *NullMatrix(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info)
{
ssize_t
x;
ssize_t
count,
y;
unsigned char
value;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
if (matrix_info->type != DiskCache)
{
(void) memset(matrix_info->elements,0,(size_t)
matrix_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(matrix_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
for (x=0; x < (ssize_t) matrix_info->length; x++)
{
count=write(matrix_info->file,&value,sizeof(value));
if (count != (ssize_t) sizeof(value))
break;
}
if (x < (ssize_t) matrix_info->length)
break;
}
return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n q u i s h M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishMagickMatrix() frees the previously acquired matrix (array of
% pointers to arrays of doubles).
%
% The format of the RelinquishMagickMatrix method is:
%
% double **RelinquishMagickMatrix(double **matrix,
% const size_t number_rows)
%
% A description of each parameter follows:
%
% o matrix: the matrix to relinquish
%
% o number_rows: the first dimension of the acquired matrix (number of
% pointers)
%
*/
MagickExport double **RelinquishMagickMatrix(double **matrix,
const size_t number_rows)
{
ssize_t
i;
if (matrix == (double **) NULL )
return(matrix);
for (i=0; i < (ssize_t) number_rows; i++)
matrix[i]=(double *) RelinquishMagickMemory(matrix[i]);
matrix=(double **) RelinquishMagickMemory(matrix);
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMatrixElement() sets the specifed element in the matrix.
%
% The format of the SetMatrixElement method is:
%
% MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: set the matrix element to this value.
%
*/
MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,const void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) y*matrix_info->columns+x;
if ((i < 0) ||
((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length))
return(MagickFalse);
if (matrix_info->type != DiskCache)
{
(void) memcpy((unsigned char *) matrix_info->elements+i*
matrix_info->stride,value,matrix_info->stride);
return(MagickTrue);
}
count=WriteMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
|
a7a.c | #define N 100000000
int a[N],b[N];
long long s=0;
main()
{
int i;
/* inicialitzacio, no en paral.lel */
for(i=0;i<N;i++)
{
a[i]=1;
b[i]=2;
}
#pragma omp parallel for
for (i=0;i<N;i++)
b[i] += a[i];
printf("Valor i %d, de b[i] %d \n",i-1,b[i-1]);
#pragma omp parallel for
for (i=0;i<N;i++)
{ int a=0;
#pragma omp atomic
s+=b[i];
}
printf("Valor %d, de b %d suma total: %ld\n",i-1,b[i-1],s);
}
|
GB_emult_02.c | //------------------------------------------------------------------------------
// GB_emult_02: C = A.*B where A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C = A.*B where A is sparse/hyper and B is bitmap/full constructs C with
// the same sparsity structure as A. This method can also be called with
// the two input matrices swapped, with flipxy true, to handle the case
// where A is bitmap/full and B is sparse/hyper.
// When no mask is present, or the mask is applied later, this method handles
// the following cases:
// ------------------------------------------
// C = A .* B
// ------------------------------------------
// sparse . sparse bitmap
// sparse . sparse full
// sparse . bitmap sparse
// sparse . full sparse
// If M is sparse/hyper and complemented, it is not passed here:
// ------------------------------------------
// C <!M>= A .* B
// ------------------------------------------
// sparse sparse sparse bitmap (mask later)
// sparse sparse sparse full (mask later)
// sparse sparse bitmap sparse (mask later)
// sparse sparse full sparse (mask later)
// If M is present, it is bitmap/full:
// ------------------------------------------
// C <M> = A .* B
// ------------------------------------------
// sparse bitmap sparse bitmap
// sparse bitmap sparse full
// sparse bitmap bitmap sparse
// sparse bitmap full sparse
// ------------------------------------------
// C <M> = A .* B
// ------------------------------------------
// sparse full sparse bitmap
// sparse full sparse full
// sparse full bitmap sparse
// sparse full full sparse
// ------------------------------------------
// C <!M> = A .* B
// ------------------------------------------
// sparse bitmap sparse bitmap
// sparse bitmap sparse full
// sparse bitmap bitmap sparse
// sparse bitmap full sparse
// ------------------------------------------
// C <!M> = A .* B
// ------------------------------------------
// sparse full sparse bitmap
// sparse full sparse full
// sparse full bitmap sparse
// sparse full full sparse
#include "GB_ewise.h"
#include "GB_emult.h"
#include "GB_binop.h"
#include "GB_unused.h"
#ifndef GBCUDA_DEV
#include "GB_binop__include.h"
#endif
#define GB_FREE_WORKSPACE \
{ \
GB_WERK_POP (Work, int64_t) ; \
GB_WERK_POP (A_ek_slicing, int64_t) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORKSPACE ; \
GB_phbix_free (C) ; \
}
GrB_Info GB_emult_02 // C=A.*B when A is sparse/hyper, B bitmap/full
(
GrB_Matrix C, // output matrix, static header
const GrB_Type ctype, // type of output matrix C
const bool C_is_csc, // format of output matrix C
const GrB_Matrix M, // optional mask, unused if NULL
const bool Mask_struct, // if true, use the only structure of M
const bool Mask_comp, // if true, use !M
const GrB_Matrix A, // input A matrix (sparse/hyper)
const GrB_Matrix B, // input B matrix (bitmap/full)
GrB_BinaryOp op, // op to perform C = op (A,B)
bool flipxy, // if true use fmult(y,x) else fmult(x,y)
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (C != NULL && (C->static_header || GBNSTATIC)) ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for emult_02", GB0) ;
ASSERT_MATRIX_OK (A, "A for emult_02", GB0) ;
ASSERT_MATRIX_OK (B, "B for emult_02", GB0) ;
ASSERT_BINARYOP_OK (op, "op for emult_02", GB0) ;
ASSERT_TYPE_OK (ctype, "ctype for emult_02", GB0) ;
ASSERT (GB_IS_SPARSE (A) || GB_IS_HYPERSPARSE (A)) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (B)) ;
ASSERT (M == NULL || GB_IS_BITMAP (B) || GB_IS_FULL (B)) ;
int C_sparsity = GB_sparsity (A) ;
if (M == NULL)
{
GBURBLE ("emult_02:(%s=%s.*%s)",
GB_sparsity_char (C_sparsity),
GB_sparsity_char_matrix (A),
GB_sparsity_char_matrix (B)) ;
}
else
{
GBURBLE ("emult_02:(%s<%s%s%s>=%s.*%s) ",
GB_sparsity_char (C_sparsity),
Mask_comp ? "!" : "",
GB_sparsity_char_matrix (M),
Mask_struct ? ",struct" : "",
GB_sparsity_char_matrix (A),
GB_sparsity_char_matrix (B)) ;
}
//--------------------------------------------------------------------------
// revise the operator to handle flipxy
//--------------------------------------------------------------------------
// Replace the ANY operator with SECOND. ANY and SECOND give the same
// result if flipxy is false. However, SECOND is changed to FIRST if
// flipxy is true. This ensures that the results do not depend on the
// sparsity structures of A and B.
if (op->opcode == GB_ANY_binop_code)
{
switch (op->xtype->code)
{
case GB_BOOL_code : op = GrB_SECOND_BOOL ; break ;
case GB_INT8_code : op = GrB_SECOND_INT8 ; break ;
case GB_INT16_code : op = GrB_SECOND_INT16 ; break ;
case GB_INT32_code : op = GrB_SECOND_INT32 ; break ;
case GB_INT64_code : op = GrB_SECOND_INT64 ; break ;
case GB_UINT8_code : op = GrB_SECOND_UINT8 ; break ;
case GB_UINT16_code : op = GrB_SECOND_UINT16 ; break ;
case GB_UINT32_code : op = GrB_SECOND_UINT32 ; break ;
case GB_UINT64_code : op = GrB_SECOND_UINT64 ; break ;
case GB_FP32_code : op = GrB_SECOND_FP32 ; break ;
case GB_FP64_code : op = GrB_SECOND_FP64 ; break ;
case GB_FC32_code : op = GxB_SECOND_FC32 ; break ;
case GB_FC64_code : op = GxB_SECOND_FC64 ; break ;
default: ;
}
}
if (flipxy)
{
bool handled ;
op = GB_flip_op (op, &handled) ;
if (handled) flipxy = false ;
}
ASSERT_BINARYOP_OK (op, "final op for emult_02", GB0) ;
//--------------------------------------------------------------------------
// declare workspace
//--------------------------------------------------------------------------
GB_WERK_DECLARE (Work, int64_t) ;
int64_t *restrict Wfirst = NULL ;
int64_t *restrict Wlast = NULL ;
int64_t *restrict Cp_kfirst = NULL ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
//--------------------------------------------------------------------------
// get M, A, and B
//--------------------------------------------------------------------------
const int8_t *restrict Mb = (M == NULL) ? NULL : M->b ;
const GB_void *restrict Mx = (M == NULL || Mask_struct) ? NULL :
(const GB_void *) M->x ;
const size_t msize = (M == NULL) ? 0 : M->type->size ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const int64_t vlen = A->vlen ;
const int64_t vdim = A->vdim ;
const int64_t nvec = A->nvec ;
const int64_t anz = GB_nnz (A) ;
const int8_t *restrict Bb = B->b ;
const bool B_is_bitmap = GB_IS_BITMAP (B) ;
//--------------------------------------------------------------------------
// check if C is iso and compute its iso value if it is
//--------------------------------------------------------------------------
const size_t csize = ctype->size ;
GB_void cscalar [GB_VLA(csize)] ;
bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ;
//--------------------------------------------------------------------------
// allocate C->p and C->h
//--------------------------------------------------------------------------
GB_OK (GB_new (&C, // sparse or hyper (same as A), existing header
ctype, vlen, vdim, GB_Ap_calloc, C_is_csc,
C_sparsity, A->hyper_switch, nvec, Context)) ;
int64_t *restrict Cp = C->p ;
//--------------------------------------------------------------------------
// slice the input matrix A
//--------------------------------------------------------------------------
int A_nthreads, A_ntasks ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
GB_SLICE_MATRIX (A, 8, chunk) ;
//--------------------------------------------------------------------------
// count entries in C
//--------------------------------------------------------------------------
C->nvec_nonempty = A->nvec_nonempty ;
C->nvec = nvec ;
const bool C_has_pattern_of_A = !B_is_bitmap && (M == NULL) ;
if (!C_has_pattern_of_A)
{
//----------------------------------------------------------------------
// allocate workspace
//----------------------------------------------------------------------
GB_WERK_PUSH (Work, 3*A_ntasks, int64_t) ;
if (Work == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
Wfirst = Work ;
Wlast = Work + A_ntasks ;
Cp_kfirst = Work + A_ntasks * 2 ;
//----------------------------------------------------------------------
// count entries in C
//----------------------------------------------------------------------
// This phase is very similar to GB_select_phase1 (GB_ENTRY_SELECTOR).
if (M == NULL)
{
//------------------------------------------------------------------
// Method2(a): C = A.*B where A is sparse/hyper and B is bitmap
//------------------------------------------------------------------
ASSERT (B_is_bitmap) ;
int tid ;
#pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < A_ntasks ; tid++)
{
int64_t kfirst = kfirst_Aslice [tid] ;
int64_t klast = klast_Aslice [tid] ;
Wfirst [tid] = 0 ;
Wlast [tid] = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// count the entries in C(:,j)
int64_t j = GBH (Ah, k) ;
int64_t pB_start = j * vlen ;
int64_t pA, pA_end ;
GB_get_pA (&pA, &pA_end, tid, k,
kfirst, klast, pstart_Aslice, Ap, vlen) ;
int64_t cjnz = 0 ;
for ( ; pA < pA_end ; pA++)
{
cjnz += Bb [pB_start + Ai [pA]] ;
}
if (k == kfirst)
{
Wfirst [tid] = cjnz ;
}
else if (k == klast)
{
Wlast [tid] = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
}
}
}
else
{
//------------------------------------------------------------------
// Method2(c): C<#M> = A.*B; M, B bitmap/full, A is sparse/hyper
//------------------------------------------------------------------
ASSERT (M != NULL) ;
int tid ;
#pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < A_ntasks ; tid++)
{
int64_t kfirst = kfirst_Aslice [tid] ;
int64_t klast = klast_Aslice [tid] ;
Wfirst [tid] = 0 ;
Wlast [tid] = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// count the entries in C(:,j)
int64_t j = GBH (Ah, k) ;
int64_t pB_start = j * vlen ;
int64_t pA, pA_end ;
GB_get_pA (&pA, &pA_end, tid, k,
kfirst, klast, pstart_Aslice, Ap, vlen) ;
int64_t cjnz = 0 ;
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
int64_t pB = pB_start + i ;
bool mij = GBB (Mb, pB) && GB_mcast (Mx, pB, msize) ;
mij = mij ^ Mask_comp ;
cjnz += (mij && GBB (Bb, pB)) ;
}
if (k == kfirst)
{
Wfirst [tid] = cjnz ;
}
else if (k == klast)
{
Wlast [tid] = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
}
}
}
//----------------------------------------------------------------------
// finalize Cp, cumulative sum of Cp and compute Cp_kfirst
//----------------------------------------------------------------------
GB_ek_slice_merge1 (Cp, Wfirst, Wlast, A_ek_slicing, A_ntasks) ;
GB_ek_slice_merge2 (&(C->nvec_nonempty), Cp_kfirst, Cp, nvec,
Wfirst, Wlast, A_ek_slicing, A_ntasks, A_nthreads, Context) ;
}
//--------------------------------------------------------------------------
// allocate C->i and C->x
//--------------------------------------------------------------------------
int64_t cnz = (C_has_pattern_of_A) ? anz : Cp [nvec] ;
// set C->iso = C_iso OK
GB_OK (GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, C_iso, Context)) ;
//--------------------------------------------------------------------------
// copy pattern into C
//--------------------------------------------------------------------------
// TODO: could make these components of C shallow instead of memcpy
if (GB_IS_HYPERSPARSE (A))
{
// copy A->h into C->h
GB_memcpy (C->h, Ah, nvec * sizeof (int64_t), A_nthreads) ;
}
if (C_has_pattern_of_A)
{
// Method2(b): B is full and no mask present, so the pattern of C is
// the same as the pattern of A
GB_memcpy (Cp, Ap, (nvec+1) * sizeof (int64_t), A_nthreads) ;
GB_memcpy (C->i, Ai, cnz * sizeof (int64_t), A_nthreads) ;
}
C->jumbled = A->jumbled ;
C->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// get the opcode
//--------------------------------------------------------------------------
// if flipxy was true on input and the op is positional, FIRST, SECOND, or
// PAIR, the op has already been flipped, so these tests do not have to
// consider that case.
GB_Opcode opcode = op->opcode ;
bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ;
bool op_is_first = (opcode == GB_FIRST_binop_code) ;
bool op_is_second = (opcode == GB_SECOND_binop_code) ;
bool op_is_pair = (opcode == GB_PAIR_binop_code) ;
GB_Type_code ccode = ctype->code ;
//--------------------------------------------------------------------------
// check if the values of A and/or B are ignored
//--------------------------------------------------------------------------
// With C = ewisemult (A,B), only the intersection of A and B is used.
// If op is SECOND or PAIR, the values of A are never accessed.
// If op is FIRST or PAIR, the values of B are never accessed.
// If op is PAIR, the values of A and B are never accessed.
// Contrast with ewiseadd.
// A is passed as x, and B as y, in z = op(x,y)
bool A_is_pattern = op_is_second || op_is_pair || op_is_positional ;
bool B_is_pattern = op_is_first || op_is_pair || op_is_positional ;
//--------------------------------------------------------------------------
// using a built-in binary operator (except for positional operators)
//--------------------------------------------------------------------------
#define GB_PHASE_2_OF_2
bool done = false ;
if (C_iso)
{
//----------------------------------------------------------------------
// C is iso
//----------------------------------------------------------------------
// Cx [0] = cscalar = op (A,B)
GB_BURBLE_MATRIX (C, "(iso emult) ") ;
memcpy (C->x, cscalar, csize) ;
// pattern of C = set intersection of pattern of A and B
// flipxy is ignored since the operator is not applied
#define GB_ISO_EMULT
#include "GB_emult_02_template.c"
done = true ;
}
else
{
#ifndef GBCUDA_DEV
//------------------------------------------------------------------
// define the worker for the switch factory
//------------------------------------------------------------------
#define GB_AemultB_02(mult,xname) GB (_AemultB_02_ ## mult ## xname)
#define GB_BINOP_WORKER(mult,xname) \
{ \
info = GB_AemultB_02(mult,xname) (C, \
M, Mask_struct, Mask_comp, A, B, flipxy, \
Cp_kfirst, A_ek_slicing, A_ntasks, A_nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
GB_Type_code xcode, ycode, zcode ;
if (!op_is_positional &&
GB_binop_builtin (A->type, A_is_pattern, B->type, B_is_pattern,
op, false, &opcode, &xcode, &ycode, &zcode) && ccode == zcode)
{
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
#endif
}
//--------------------------------------------------------------------------
// generic worker
//--------------------------------------------------------------------------
if (!done)
{
GB_BURBLE_MATRIX (C, "(generic emult_02: %s) ", op->name) ;
int ewise_method = flipxy ? GB_EMULT_METHOD3 : GB_EMULT_METHOD2 ;
GB_ewise_generic (C, op, NULL, 0, 0,
NULL, NULL, NULL, C_sparsity, ewise_method, Cp_kfirst,
NULL, 0, 0, A_ek_slicing, A_ntasks, A_nthreads, NULL, 0, 0,
M, Mask_struct, Mask_comp, A, B, Context) ;
}
//--------------------------------------------------------------------------
// remove empty vectors from C, if hypersparse
//--------------------------------------------------------------------------
GB_OK (GB_hypermatrix_prune (C, Context)) ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
ASSERT_MATRIX_OK (C, "C output for emult_02", GB0) ;
return (GrB_SUCCESS) ;
}
|
GB_unop__abs_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__abs_fp32_fp32
// op(A') function: GB_unop_tran__abs_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = fabsf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabsf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = fabsf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__abs_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = fabsf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = fabsf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__abs_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hnswalg.h | #pragma once
#include "hnswlib.h"
#include "visited_list_pool.h"
#include <random>
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unordered_set>
#include <unordered_map>
#include <array>
#include <map>
#include <cmath>
#include <queue>
template<typename T>
static void writeBinaryPOD(std::ostream &out, const T &podRef) {
out.write((char *) &podRef, sizeof(T));
}
template<typename T>
static void readBinaryPOD(std::istream &in, T &podRef) {
in.read((char *) &podRef, sizeof(T));
}
#define DEBUG_LIB 1
namespace hnswlib {
typedef unsigned int tableint;
typedef unsigned char linklistsizeint;
template<typename dist_t, typename vtype>
class HierarchicalNSW
{
public:
HierarchicalNSW(SpaceInterface<dist_t> *s) {}
HierarchicalNSW(SpaceInterface<dist_t> *s, const string &infoLocation, const string &dataLocation,
const string &edgeLocation, bool nmslib = false)
{
LoadInfo(infoLocation, s);
LoadData(dataLocation);
LoadEdges(edgeLocation);
}
HierarchicalNSW(SpaceInterface<dist_t> *s, size_t maxelements, size_t M, size_t maxM, size_t efConstruction = 200)
{
space = s;
data_size_ = s->get_data_size();
efConstruction_ = efConstruction;
maxelements_ = maxelements;
M_ = M;
maxM_ = maxM;
size_links_level0 = maxM * sizeof(tableint) + sizeof(linklistsizeint);
size_data_per_element = size_links_level0 + data_size_;
offsetData = size_links_level0;
std::cout << (data_level0_memory_ ? 1 : 0) << std::endl;
data_level0_memory_ = (char *) malloc(maxelements_ * size_data_per_element);
std::cout << (data_level0_memory_ ? 1 : 0) << std::endl;
cout << "Size Mb: " << (maxelements_ * size_data_per_element) / (1000 * 1000) << "\n";
cur_element_count = 0;
visitedlistpool = new VisitedListPool(1, maxelements_);
//initializations for special treatment of the first node
enterpoint_node = -1;
maxlevel_ = -1;
elementLevels = vector<char>(maxelements_);
for (size_t i = 0; i < maxelements_; ++i)
elementLevels[i] = 0;
}
~HierarchicalNSW()
{
free(data_level0_memory_);
delete visitedlistpool;
}
// Fields
SpaceInterface<dist_t> *space;
size_t maxelements_;
size_t cur_element_count;
size_t efConstruction_;
int maxlevel_;
VisitedListPool *visitedlistpool;
mutex cur_element_count_guard_;
mutex MaxLevelGuard_;
tableint enterpoint_node;
size_t dist_calc;
char *data_level0_memory_;
vector<char> elementLevels;
size_t data_size_;
size_t offsetData;
size_t size_data_per_element;
size_t M_;
size_t maxM_;
size_t size_links_level0;
inline char *getDataByInternalId(tableint internal_id) const
{
return (data_level0_memory_ + internal_id * size_data_per_element + offsetData);
}
inline linklistsizeint *get_linklist0(tableint internal_id) const
{
return (linklistsizeint *) (data_level0_memory_ + internal_id * size_data_per_element);
};
std::priority_queue<std::pair<dist_t, tableint >> searchBaseLayer(tableint ep, void *datapoint, int level, int ef)
{
VisitedList *vl = visitedlistpool->getFreeVisitedList();
vl_type *massVisited = vl->mass;
vl_type currentV = vl->curV;
std::priority_queue<std::pair<dist_t, tableint >> topResults;
std::priority_queue<std::pair<dist_t, tableint >> candidateSet;
dist_t dist = space->fstdistfunc(datapoint, getDataByInternalId(ep));
topResults.emplace(dist, ep);
candidateSet.emplace(-dist, ep);
massVisited[ep] = currentV;
dist_t lowerBound = dist;
while (!candidateSet.empty()) {
std::pair<dist_t, tableint> curr_el_pair = candidateSet.top();
if ((-curr_el_pair.first) > lowerBound) {
break;
}
candidateSet.pop();
tableint curNodeNum = curr_el_pair.second;
linklistsizeint *ll_cur = get_linklist0(curNodeNum);
linklistsizeint size = *ll_cur;
tableint *data = (tableint *) (ll_cur + 1);
_mm_prefetch(getDataByInternalId(*data), _MM_HINT_T0);
for (linklistsizeint j = 0; j < size; ++j) {
tableint tnum = *(data + j);
_mm_prefetch(getDataByInternalId(*(data + j + 1)), _MM_HINT_T0);
if (!(massVisited[tnum] == currentV)) {
massVisited[tnum] = currentV;
dist_t dist = space->fstdistfunc(datapoint, getDataByInternalId(tnum));
if (topResults.top().first > dist || topResults.size() < ef) {
candidateSet.emplace(-dist, tnum);
_mm_prefetch(getDataByInternalId(candidateSet.top().second), _MM_HINT_T0);
topResults.emplace(dist, tnum);
if (topResults.size() > ef) {
topResults.pop();
}
lowerBound = topResults.top().first;
}
}
}
}
visitedlistpool->releaseVisitedList(vl);
return topResults;
}
struct CompareByFirst {
constexpr bool operator()(pair<dist_t, tableint> const &a,
pair<dist_t, tableint> const &b) const noexcept {
return a.first < b.first;
}
};
std::priority_queue<std::pair<dist_t, tableint>, vector<pair<dist_t, tableint>>, CompareByFirst>
searchBaseLayerST(tableint ep, void *datapoint, size_t ef)
{
VisitedList *vl = visitedlistpool->getFreeVisitedList();
vl_type *massVisited = vl->mass;
vl_type currentV = vl->curV;
std::priority_queue<std::pair<dist_t, tableint>, vector<pair<dist_t, tableint>>, CompareByFirst> topResults;
std::priority_queue<std::pair<dist_t, tableint>, vector<pair<dist_t, tableint>>, CompareByFirst> candidateSet;
dist_t dist = space->fstdistfunc(datapoint, getDataByInternalId(ep));
dist_calc++;
topResults.emplace(dist, ep);
candidateSet.emplace(-dist, ep);
massVisited[ep] = currentV;
dist_t lowerBound = dist;
while (!candidateSet.empty()) {
hops0 += 1.0 / 100000;
std::pair<dist_t, tableint> curr_el_pair = candidateSet.top();
if (-curr_el_pair.first > lowerBound)
break;
candidateSet.pop();
tableint curNodeNum = curr_el_pair.second;
linklistsizeint *ll_cur = get_linklist0(curNodeNum);
linklistsizeint size = *ll_cur;
tableint *data = (tableint *)(ll_cur + 1);
_mm_prefetch((char *) (massVisited + *data), _MM_HINT_T0);
_mm_prefetch((char *) (massVisited + *data + 64), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(*data), _MM_HINT_T0);
for (linklistsizeint j = 0; j < size; ++j) {
int tnum = *(data + j);
_mm_prefetch((char *) (massVisited + *(data + j + 1)), _MM_HINT_T0);
_mm_prefetch(getDataByInternalId(*(data + j + 1)), _MM_HINT_T0);
if (!(massVisited[tnum] == currentV)) {
massVisited[tnum] = currentV;
dist_t dist = space->fstdistfunc(datapoint, getDataByInternalId(tnum));
dist_calc++;
if (topResults.top().first > dist || topResults.size() < ef) {
candidateSet.emplace(-dist, tnum);
_mm_prefetch(get_linklist0(candidateSet.top().second), _MM_HINT_T0);
topResults.emplace(dist, tnum);
if (topResults.size() > ef)
topResults.pop();
lowerBound = topResults.top().first;
}
}
}
}
visitedlistpool->releaseVisitedList(vl);
return topResults;
}
void getNeighborsByHeuristic(std::priority_queue<std::pair<dist_t, tableint>> &topResults, const int NN) {
if (topResults.size() < NN)
return;
std::priority_queue<std::pair<dist_t, tableint>> resultSet;
std::priority_queue<std::pair<dist_t, tableint>> templist;
vector<std::pair<dist_t, tableint>> returnlist;
while (topResults.size() > 0) {
resultSet.emplace(-topResults.top().first, topResults.top().second);
topResults.pop();
}
while (resultSet.size()) {
if (returnlist.size() >= NN)
break;
std::pair<dist_t, tableint> curen = resultSet.top();
dist_t dist_to_query = -curen.first;
resultSet.pop();
bool good = true;
for (std::pair<dist_t, tableint> curen2 : returnlist) {
dist_t curdist = space->fstdistfunc(getDataByInternalId(curen2.second),
getDataByInternalId(curen.second));
if (curdist < dist_to_query) {
good = false;
break;
}
}
if (good) returnlist.push_back(curen);
}
for (std::pair<dist_t, tableint> curen2 : returnlist)
topResults.emplace(-curen2.first, curen2.second);
}
void mutuallyConnectNewElement(void *datapoint, tableint cur_c,
std::priority_queue<std::pair<dist_t, tableint>> topResults, int level)
{
size_t curMmax = maxM_;
size_t curM = M_;
getNeighborsByHeuristic(topResults, curM);
while (topResults.size() > curM) {
throw exception();
}
vector<tableint> rez;
rez.reserve(curM);
while (topResults.size() > 0) {
rez.push_back(topResults.top().second);
topResults.pop();
}
{
linklistsizeint *ll_cur = get_linklist0(cur_c);
if (*ll_cur) {
cout << *ll_cur << "\n";
cout << (int) elementLevels[cur_c] << "\n";
cout << level << "\n";
throw runtime_error("Should be blank");
}
*ll_cur = rez.size();
tableint *data = (tableint *)(ll_cur + 1);
for (int idx = 0; idx < rez.size(); idx++) {
if (data[idx])
throw runtime_error("Should be blank");
if (level > elementLevels[rez[idx]])
throw runtime_error("Bad level");
data[idx] = rez[idx];
}
}
for (int idx = 0; idx < rez.size(); idx++) {
if (rez[idx] == cur_c)
throw runtime_error("Connection to the same element");
size_t rezMmax = maxM_;
linklistsizeint *ll_other = get_linklist0(rez[idx]);
if (level > elementLevels[rez[idx]])
throw runtime_error("Bad level");
linklistsizeint sz_link_list_other = *ll_other;
if (sz_link_list_other > rezMmax || sz_link_list_other < 0)
throw runtime_error("Bad sz_link_list_other");
if (sz_link_list_other < rezMmax) {
tableint *data = (tableint *) (ll_other + 1);
data[sz_link_list_other] = cur_c;
*ll_other = sz_link_list_other + 1;
} else {
// finding the "weakest" element to replace it with the new one
tableint *data = (tableint *) (ll_other + 1);
dist_t d_max = space->fstdistfunc(getDataByInternalId(cur_c), getDataByInternalId(rez[idx]));
// Heuristic:
std::priority_queue<std::pair<dist_t, tableint>> candidates;
candidates.emplace(d_max, cur_c);
for (int j = 0; j < sz_link_list_other; j++)
candidates.emplace(space->fstdistfunc(getDataByInternalId(data[j]), getDataByInternalId(rez[idx])), data[j]);
getNeighborsByHeuristicMerge(candidates, rezMmax);
int indx = 0;
while (candidates.size() > 0) {
data[indx] = candidates.top().second;
candidates.pop();
indx++;
}
*ll_other = indx;
}
}
}
mutex global;
size_t ef_;
// My
float nev9zka = 0.0;
tableint enterpoint0;
float hops0 = 0.0;
void addPoint(void *datapoint, labeltype label)
{
tableint cur_c = 0;
{
unique_lock <mutex> lock(cur_element_count_guard_);
if (cur_element_count >= maxelements_) {
cout << "The number of elements exceeds the specified limit\n";
throw runtime_error("The number of elements exceeds the specified limit");
};
cur_c = cur_element_count;
cur_element_count++;
}
int curlevel = elementLevels[cur_c];
unique_lock <mutex> templock(global);
int maxlevelcopy = maxlevel_;
if (curlevel <= maxlevelcopy)
templock.unlock();
memset((char *) get_linklist0(cur_c), 0, size_data_per_element);
memcpy(getDataByInternalId(cur_c), datapoint, data_size_);
tableint currObj = enterpoint_node;
if (currObj != -1) {
if (curlevel < maxlevelcopy) {
dist_t curdist = space->fstdistfunc(datapoint, getDataByInternalId(currObj));
for (int level = maxlevelcopy; level > curlevel; level--) {
bool changed = true;
while (changed) {
changed = false;
linklistsizeint *data = get_linklist0(currObj);
linklistsizeint size = *data;
tableint *datal = (tableint *) (data + 1);
for (linklistsizeint i = 0; i < size; i++) {
tableint cand = datal[i];
if (cand < 0 || cand > maxelements_)
throw runtime_error("cand error");
dist_t d = space->fstdistfunc(datapoint, getDataByInternalId(cand));
if (d < curdist) {
curdist = d;
currObj = cand;
changed = true;
}
}
}
}
}
for (int level = 0; level <= min(curlevel, maxlevelcopy); level++) {
if (level > maxlevelcopy || level < 0)
throw runtime_error("Level error");
std::priority_queue<std::pair<dist_t, tableint>> topResults = searchBaseLayer(currObj, datapoint,
level, efConstruction_);
mutuallyConnectNewElement(datapoint, cur_c, topResults, level);
}
} else {
// Do nothing for the first element
enterpoint_node = 0;
maxlevel_ = curlevel;
}
//Releasing lock for the maximum level
if (curlevel > maxlevelcopy) {
enterpoint_node = cur_c;
maxlevel_ = curlevel;
}
};
std::priority_queue<std::pair<dist_t, labeltype >> searchKnn(void *query_data, int k, int q_idx = -1)
{
tableint currObj = enterpoint_node;
dist_t curdist = space->fstdistfunc(query_data, getDataByInternalId(enterpoint_node));
dist_calc++;
for (int level = maxlevel_; level > 0; level--) {
bool changed = true;
while (changed) {
changed = false;
linklistsizeint *data = get_linklist0(currObj);
linklistsizeint size = *data;
tableint *datal = (tableint *) (data + 1);
for (linklistsizeint i = 0; i < size; i++) {
tableint cand = datal[i];
if (cand < 0 || cand > maxelements_)
throw runtime_error("cand error");
dist_t d = space->fstdistfunc(query_data, getDataByInternalId(cand));
dist_calc++;
if (d < curdist) {
curdist = d;
currObj = cand;
changed = true;
}
}
}
}
enterpoint0 = currObj;
std::priority_queue<std::pair<dist_t, tableint>, vector<pair<dist_t, tableint>>, CompareByFirst> tmpTopResults = searchBaseLayerST(
currObj, query_data, ef_);
// Remove clusters as answers
std::priority_queue<std::pair<dist_t, tableint >> topResults;
while (tmpTopResults.size() > 0) {
std::pair<dist_t, tableint> rez = tmpTopResults.top();
topResults.push(rez);
tmpTopResults.pop();
}
while (topResults.size() > k)
topResults.pop();
return topResults;
};
void printListsize()
{
float av_M = 0;
int numLinks[32];
for (int i = 0; i < 32; i++)
numLinks[i] = 0;
for (int i = 0; i < maxelements_; i++){
linklistsizeint *ll_cur = get_linklist0(i);
numLinks[*ll_cur - 1]++;
av_M += (1.0 * *ll_cur) / maxelements_;
}
std::cout << "Links distribution" << std::endl;
for (int i = 0; i < 32; i++){
cout << " Number of elements with " << i+1 << " links: " << numLinks[i] << endl;
}
}
void SaveInfo(const string &location) {
cout << "Saving info to " << location << endl;
std::ofstream output(location, std::ios::binary);
streampos position;
writeBinaryPOD(output, maxelements_);
writeBinaryPOD(output, enterpoint_node);
writeBinaryPOD(output, data_size_);
writeBinaryPOD(output, offsetData);
writeBinaryPOD(output, size_data_per_element);
writeBinaryPOD(output, M_);
writeBinaryPOD(output, maxM_);
writeBinaryPOD(output, size_links_level0);
output.close();
}
void SaveEdges(const string &location)
{
cout << "Saving edges to " << location << endl;
FILE *fout = fopen(location.c_str(), "wb");
for (tableint i = 0; i < maxelements_; i++) {
linklistsizeint *ll_cur = get_linklist0(i);
int size = *ll_cur;
fwrite((int *)&size, sizeof(int), 1, fout);
tableint *data = (tableint *)(ll_cur + 1);
fwrite(data, sizeof(tableint), *ll_cur, fout);
}
}
void LoadInfo(const string &location, SpaceInterface<dist_t> *s)
{
cout << "Loading info from " << location << endl;
std::ifstream input(location, std::ios::binary);
streampos position;
space = s;
data_size_ = s->get_data_size();
readBinaryPOD(input, maxelements_);
readBinaryPOD(input, enterpoint_node);
readBinaryPOD(input, data_size_);
readBinaryPOD(input, offsetData);
readBinaryPOD(input, size_data_per_element);
readBinaryPOD(input, M_);
readBinaryPOD(input, maxM_);
readBinaryPOD(input, size_links_level0);
data_level0_memory_ = (char *) malloc(maxelements_ * size_data_per_element);
efConstruction_ = 0;
cur_element_count = maxelements_;
visitedlistpool = new VisitedListPool(1, maxelements_);
elementLevels = vector<char>(maxelements_);
for (size_t i = 0; i < maxelements_; ++i)
elementLevels[i] = 0;
maxlevel_ = 0;
cout << "Predicted size=" << maxelements_ * size_data_per_element / (1000 * 1000) << "\n";
input.close();
}
void LoadData(const string &location)
{
cout << "Loading data from " << location << endl;
FILE *fin = fopen(location.c_str(), "rb");
int dim;
const int D = space->get_data_dim();
vtype mass[D];
for (tableint i = 0; i < maxelements_; i++) {
fread((int *) &dim, sizeof(int), 1, fin);
if (dim != D)
cerr << "Wront data dim" << endl;
fread(mass, sizeof(vtype), dim, fin);
memset((char *) get_linklist0(i), 0, size_data_per_element);
memcpy(getDataByInternalId(i), mass, data_size_);
}
}
void LoadEdges(const string &location)
{
cout << "Loading edges from " << location << endl;
FILE *fin = fopen(location.c_str(), "rb");
int size;
for (tableint i = 0; i < maxelements_; i++) {
fread((int *)&size, sizeof(int), 1, fin);
linklistsizeint *ll_cur = get_linklist0(i);
*ll_cur = size;
tableint *data = (tableint *)(ll_cur + 1);
fread((tableint *)data, sizeof(tableint), size, fin);
}
}
void getNeighborsByHeuristicMerge(std::priority_queue<std::pair<dist_t, tableint>> &topResults, const int NN) {
if (topResults.size() < NN)
return;
std::priority_queue<std::pair<dist_t, tableint>> resultSet;
std::priority_queue<std::pair<dist_t, tableint>> templist;
std::vector<std::pair<dist_t, tableint>> returnlist;
while (topResults.size() > 0) {
resultSet.emplace(-topResults.top().first, topResults.top().second);
topResults.pop();
}
while (resultSet.size()) {
if (returnlist.size() >= NN)
break;
std::pair<dist_t, tableint> curen = resultSet.top();
dist_t dist_to_query = -curen.first;
resultSet.pop();
bool good = true;
for (std::pair<dist_t, tableint> curen2 : returnlist) {
dist_t curdist = space->fstdistfunc(getDataByInternalId(curen2.second), getDataByInternalId(curen.second));
if (curdist < dist_to_query) {
good = false;
break;
}
}
if (good)
returnlist.push_back(curen);
else
templist.emplace(curen);
}
while (returnlist.size() < NN && templist.size() > 0) {
returnlist.push_back(templist.top());
templist.pop();
}
for (std::pair<dist_t, tableint> curen2 : returnlist)
topResults.emplace(-curen2.first, curen2.second);
}
void merge(const HierarchicalNSW<dist_t, vtype> *hnsw)
{
int counter = 0;
//#pragma omp parallel for
for (int i = 0; i < maxelements_; i++){
float *data = (float *) getDataByInternalId(i);
linklistsizeint *ll1 = get_linklist0(i);
linklistsizeint *ll2 = hnsw->get_linklist0(maxelements_- 1 - i);
float identity = space->fstdistfunc((void *)data, (void *)hnsw->getDataByInternalId(maxelements_- 1 - i));
if (identity > 0.0000001){
std::cout << "Merging different points\n";
exit(1);
}
size_t size1 = *ll1;
size_t size2 = *ll2;
labeltype *links1 = (labeltype *)(ll1 + 1);
labeltype *links2 = (labeltype *)(ll2 + 1);
std::unordered_set<labeltype> links;
for (labeltype link = 0; link < size1; link++)
links.insert(links1[link]);
for (labeltype link = 0; link < size2; link++)
links.insert(maxelements_- 1 - links2[link]);
if (links.size() <= maxM_){
int indx = 0;
for (labeltype link : links)
links1[indx++] = link;
*ll1 = indx;
} else {
std::priority_queue<std::pair<dist_t, tableint>> topResults;
for (labeltype link : links){
float *point = (float *) getDataByInternalId(link);
dist_t dist = space->fstdistfunc((void *)data, (void *)point);
topResults.emplace(std::make_pair(dist, link));
}
getNeighborsByHeuristicMerge(topResults, maxM_);
int indx = 0;
while (topResults.size() > 0) {
links1[indx++] = topResults.top().second;
topResults.pop();
}
*ll1 = indx;
}
if (*ll1 < maxM_)
counter++;
}
std::cout << counter << std::endl;
}
};
}
|
profiles.c | #include "allvars.h"
#include "profiles.h"
void ComputeProfiles()
{
int i,j,k,ic,jc,kc,l,ii,jj,kk,next,ibin,in,m,NumGrid;
double xc[3],xt[3],dx[3],vt[3],dist,Rho,GridSize[3];
double dR,DeltaDiff,DeltaCum,MinDist,MaxDist,GAP;
double CGal[NumProfileBins],Suma[NumProfileBins],CVel[NumProfileBins];
double VRad,rm,ri,rs,Vol,DeltaMax,Radius;
vector <int> Indx;
char OutFile[MAXCHAR];
int NumNeigh;
struct neighbour Neigh;
struct grid *GridList;
FILE *fd;
clock_t t;
fprintf(logfile,"\n COMPUTING VOID PROFILES \n");
t = clock();
NumGrid = (int)round(cbrt((double)NumTrac/10.0));
if (NumGrid < 100) NumGrid = 100;
GridList = (struct grid *) malloc(NumGrid*NumGrid*NumGrid*sizeof(struct grid));
BuildGridList(GridList,NumGrid,GridSize,0,false);
// Only for true voids
for (i=0; i<NumVoid; i++)
if (Void[i].ToF)
Indx.push_back(i);
dR = (log10(MaxProfileDist)-log10(MinProfileDist))/(double)NumProfileBins;
// Selecciono vecinos
MaxDist = 0.0;
for (i=0; i<NumVoid; i++) {
if (!Void[i].ToF) continue;
if (Void[i].Rad > MaxDist) MaxDist = Void[i].Rad;
}
MaxDist *= MaxProfileDist;
GAP = 0.0;
for (k=0; k<3; k++)
if (GridSize[k] > GAP)
GAP = GridSize[k];
GAP *= sqrt(3.0);
MinDist = 0.0;
MaxDist = MaxDist + GAP;
SearchNeighbours(&Neigh,&NumNeigh,GridSize,MinDist,MaxDist);
fprintf(logfile," | MinDist - MaxDist = %5.3f - %5.3f [Mpc/h], %d grids \n",MinDist,MaxDist,NumNeigh);
fflush(logfile);
#pragma omp parallel for default(none) schedule(dynamic) \
shared(NumVoid,Void,Tracer,NumNeigh,Neigh,dR,NumGrid,GridSize,GridList, \
Indx,MeanNumTrac,LBox,NumProfileBins,MinProfileDist,MaxProfileDist,\
WriteProfiles,PathProfiles,GAP) \
private(i,m,k,ii,jj,kk,l,Radius,ic,jc,kc,CVel,CGal,Suma,xc,xt,dx,vt,next, \
dist,VRad,ibin,DeltaMax,ri,rm,rs,Rho,Vol,DeltaDiff,DeltaCum,fd,in,\
OutFile)
for (i=0; i<Indx.size(); i++) {
for (k=0; k<NumProfileBins; k++) {
CVel[k] = 0.0;
CGal[k] = 0.0;
Suma[k] = 0.0;
}
Radius = Void[Indx[i]].Rad;
for (k=0; k<3; k++)
xc[k] = Void[Indx[i]].Pos[k];
Void[Indx[i]].Dtype = 0.0;
ic = (int)(xc[0]/GridSize[0]);
jc = (int)(xc[1]/GridSize[1]);
kc = (int)(xc[2]/GridSize[2]);
for (in=0; in<NumNeigh; in++) {
ii = Neigh.i[in];
jj = Neigh.j[in];
kk = Neigh.k[in];
dist = (double)(ii*ii)*(GridSize[0]*GridSize[0])
+ (double)(jj*jj)*(GridSize[1]*GridSize[1])
+ (double)(kk*kk)*(GridSize[2]*GridSize[2]);
dist = sqrt(dist);
if (dist > MaxProfileDist*Radius+GAP) continue;
ii = PeriodicGrid(ii + ic, NumGrid);
jj = PeriodicGrid(jj + jc, NumGrid);
kk = PeriodicGrid(kk + kc, NumGrid);
l = Index1D(ii,jj,kk,NumGrid);
if (GridList[l].NumMem == 0) continue;
for (m=0; m<GridList[l].NumMem; m++) {
next = GridList[l].Member[m];
for (k=0; k<3; k++) {
xt[k] = (double)Tracer[next].Pos[k];
vt[k] = (double)(Tracer[next].Vel[k] - Void[Indx[i]].Vel[k]);
dx[k] = PeriodicDeltaPos(xt[k] - xc[k],LBox[k])/Radius;
}
dist = sqrt(dx[0]*dx[0] + dx[1]*dx[1] + dx[2]*dx[2]);
if (dist > MinProfileDist && dist < MaxProfileDist) {
ibin = (int)((log10(dist)-log10(MinProfileDist))/dR);
VRad = vt[0]*dx[0] + vt[1]*dx[1] + vt[2]*dx[2];
VRad /= dist;
CVel[ibin] += VRad;
CGal[ibin] += 1.0;
}
}
}
for (k=0; k<NumProfileBins; k++) {
if (CGal[k] < 3.0) {
CGal[k] = 0.0;
CVel[k] = 0.0;
} else {
CVel[k] /= CGal[k];
}
}
if (WriteProfiles == 1) {
sprintf(OutFile,"%s/void_%d.dat",PathProfiles,i);
fd = SafeOpen(OutFile,"w");
}
DeltaMax = -1.0;
for (k=0; k<NumProfileBins; k++) {
for (kk=0; kk<=k; kk++)
Suma[k] += CGal[kk];
ri = (double)(k )*dR + log10(MinProfileDist);
rm = (double)(k+0.5)*dR + log10(MinProfileDist);
rs = (double)(k+1.0)*dR + log10(MinProfileDist);
ri = pow(10.0,ri)*Radius;
rm = pow(10.0,rm)*Radius;
rs = pow(10.0,rs)*Radius;
Vol = (4.0/3.0)*PI*(pow(rs,3) - pow(ri,3));
Rho = CGal[k]/Vol;
DeltaDiff = Rho/MeanNumTrac - 1.0;
Vol = (4.0/3.0)*PI*pow(rs,3);
Rho = Suma[k]/Vol;
DeltaCum = Rho/MeanNumTrac - 1.0;
if (WriteProfiles == 1)
fprintf(fd,"%12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f \n",
ri,rm,rs,CVel[k],DeltaDiff,DeltaCum,Radius);
if (rs < 2.0*Radius || rs > 3.0*Radius) continue;
if (DeltaCum > DeltaMax) DeltaMax = DeltaCum;
}
if (WriteProfiles == 1)
fclose(fd);
Void[Indx[i]].Dtype = DeltaMax;
}
Indx.clear();
FreeNeighbours(&Neigh);
StepName.push_back("Computing profiles");
StepTime.push_back(Time(t,OMPcores));
}
|
decorate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: Define the width and height of the border.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->matte_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->matte_color=image->matte_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the matte_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
accentuate,
border,
highlight,
matte,
shadow,
trough;
register ssize_t
x;
size_t
bevel_width,
height,
width;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
x=(ssize_t) frame_info->width-frame_info->x-bevel_width;
y=(ssize_t) frame_info->height-frame_info->y-bevel_width;
if ((x < (ssize_t) image->columns) || (y < (ssize_t) image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass) == MagickFalse)
{
InheritException(exception,&frame_image->exception);
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if ((IsPixelGray(&frame_image->border_color) == MagickFalse) &&
(IsGrayColorspace(frame_image->colorspace) != MagickFalse))
(void) SetImageColorspace(frame_image,sRGBColorspace);
if ((frame_image->border_color.opacity != OpaqueOpacity) &&
(frame_image->matte == MagickFalse))
(void) SetImageAlphaChannel(frame_image,OpaqueAlphaChannel);
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
GetMagickPixelPacket(frame_image,&matte);
matte.colorspace=sRGBColorspace;
SetMagickPixelPacket(frame_image,&image->matte_color,(IndexPacket *) NULL,
&matte);
GetMagickPixelPacket(frame_image,&border);
border.colorspace=sRGBColorspace;
SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL,
&border);
GetMagickPixelPacket(frame_image,&accentuate);
accentuate.red=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate)));
accentuate.green=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate)));
accentuate.blue=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate)));
accentuate.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&highlight);
highlight.red=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.red+(QuantumRange*HighlightModulate)));
highlight.green=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.green+(QuantumRange*HighlightModulate)));
highlight.blue=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate)));
highlight.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&shadow);
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&trough);
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.opacity=matte.opacity;
if (image->colorspace == CMYKColorspace)
{
ConvertRGBToCMYK(&matte);
ConvertRGBToCMYK(&border);
ConvertRGBToCMYK(&accentuate);
ConvertRGBToCMYK(&highlight);
ConvertRGBToCMYK(&shadow);
ConvertRGBToCMYK(&trough);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
frame_view=AcquireAuthenticCacheView(frame_image,exception);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
register IndexPacket
*magick_restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
if (q != (PixelPacket *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,frame_image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
/*
Set frame interior pixels.
*/
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
(void) CopyMagickMemory(q,p,image->columns*sizeof(*p));
if ((image->colorspace == CMYKColorspace) &&
(frame_image->colorspace == CMYKColorspace))
{
(void) CopyMagickMemory(frame_indexes,indexes,image->columns*
sizeof(*indexes));
frame_indexes+=image->columns;
}
q+=image->columns;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FrameImage)
#endif
proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
register IndexPacket
*magick_restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (PixelPacket *) NULL)
{
/*
Draw bottom of ornamental border.
*/
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (status != MagickFalse)
status=CompositeImage(frame_image,image->compose,image,x,y);
if (status == MagickFalse)
frame_image=DestroyImage(frame_image);
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=QuantumRange;
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q++;
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
omp-parallel-for.c | #include <omp.h>
#include <stdio.h>
#define LEN 20
int main(void)
{
int num[LEN] = {0}, k=0;
#pragma omp parallel for
for (k=0; k<LEN; k++)
{
num[k] = omp_get_thread_num();
}
return 0;
}
|
wand-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% W W AAA N N DDDD %
% W W A A NN N D D %
% W W W AAAAA N N N D D %
% WW WW A A N NN D D %
% W W A A N N DDDD %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickWand Wand View Methods %
% %
% Software Design %
% Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "wand/studio.h"
#include "wand/MagickWand.h"
#include "wand/magick-wand-private.h"
#include "wand/wand.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define WandViewId "WandView"
/*
Typedef declarations.
*/
struct _WandView
{
size_t
id;
char
name[MaxTextExtent],
*description;
RectangleInfo
extent;
MagickWand
*wand;
CacheView
*view;
size_t
number_threads;
PixelWand
***pixel_wands;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneWandView() makes a copy of the specified wand view.
%
% The format of the CloneWandView method is:
%
% WandView *CloneWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport WandView *CloneWandView(const WandView *wand_view)
{
WandView
*clone_view;
register ssize_t
i;
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
(void) ResetMagickMemory(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) clone_view->id);
clone_view->description=ConstantString(wand_view->description);
clone_view->view=CloneCacheView(wand_view->view);
clone_view->extent=wand_view->extent;
clone_view->number_threads=wand_view->number_threads;
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,wand_view->exception);
for (i=0; i < (ssize_t) wand_view->number_threads; i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
wand_view->pixel_wands[i],wand_view->extent.width);
clone_view->debug=wand_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=WandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyWandView() deallocates memory associated with a wand view.
%
% The format of the DestroyWandView method is:
%
% WandView *DestroyWandView(WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands,const size_t number_threads)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport WandView *DestroyWandView(WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands,
wand_view->extent.width,wand_view->number_threads);
wand_view->view=DestroyCacheView(wand_view->view);
wand_view->exception=DestroyExceptionInfo(wand_view->exception);
wand_view->signature=(~WandSignature);
RelinquishWandId(wand_view->id);
wand_view=(WandView *) RelinquishMagickMemory(wand_view);
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferWandViewIterator() iterates over three wand views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination wand view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const WandView *source,
% const WandView *duplex,WandView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferWandViewIterator method is:
%
% MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
% WandView *duplex,WandView *destination,
% DuplexTransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o duplex: the duplex wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer,
void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*duplex_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (DuplexTransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
duplex_image=duplex->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict duplex_indexes,
*magick_restrict indexes;
register const PixelPacket
*magick_restrict duplex_pixels,
*magick_restrict pixels;
register IndexPacket
*magick_restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view);
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x);
if (duplex_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetBlackQuantum(duplex->pixel_wands[id][x],
GetPixelBlack(duplex_indexes+x));
if (duplex_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) duplex->extent.width; x++)
PixelSetIndex(duplex->pixel_wands[id][x],
GetPixelIndex(duplex_indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelBlack(destination_indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(destination_indexes+x));
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_DuplexTransferWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a wand view.
%
% The format of the GetWandViewException method is:
%
% char *GetWandViewException(const WandView *wand_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o wand_view: the pixel wand_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetWandViewException(const WandView *wand_view,
ExceptionType *severity)
{
char
*description;
assert(wand_view != (const WandView *) NULL);
assert(wand_view->signature == WandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=wand_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
*description='\0';
if (wand_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->reason),
MaxTextExtent);
if (wand_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewExtent() returns the wand view extent.
%
% The format of the GetWandViewExtent method is:
%
% RectangleInfo GetWandViewExtent(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewIterator() iterates over the wand view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const WandView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetWandViewIterator method is:
%
% MagickBooleanType GetWandViewIterator(WandView *source,
% GetWandViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetWandViewIterator(WandView *source,
GetWandViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (get == (GetWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*indexes;
register const PixelPacket
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_GetWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewPixels() returns the wand view pixel_wands.
%
% The format of the GetWandViewPixels method is:
%
% PixelWand *GetWandViewPixels(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport PixelWand **GetWandViewPixels(const WandView *wand_view)
{
const int
id = GetOpenMPThreadId();
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewWand() returns the magick wand associated with the wand view.
%
% The format of the GetWandViewWand method is:
%
% MagickWand *GetWandViewWand(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickWand *GetWandViewWand(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsWandView() returns MagickTrue if the the parameter is verified as a wand
% view object.
%
% The format of the IsWandView method is:
%
% MagickBooleanType IsWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickBooleanType IsWandView(const WandView *wand_view)
{
size_t
length;
if (wand_view == (const WandView *) NULL)
return(MagickFalse);
if (wand_view->signature != WandSignature)
return(MagickFalse);
length=strlen(WandViewId);
if (LocaleNCompare(wand_view->name,WandViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandView() returns a wand view required for all other methods in the
% Wand View API.
%
% The format of the NewWandView method is:
%
% WandView *NewWandView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands,
const size_t number_threads)
{
PixelWand
***pixel_wands;
register ssize_t
i;
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads));
}
return(pixel_wands);
}
WandExport WandView *NewWandView(MagickWand *wand)
{
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->wand=wand;
wand_view->exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,
wand_view->exception);
wand_view->extent.width=wand->images->columns;
wand_view->extent.height=wand->images->rows;
wand_view->number_threads=GetOpenMPMaximumThreads();
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width,
wand_view->number_threads);
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=WandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandViewExtent() returns a wand view required for all other methods
% in the Wand View API.
%
% The format of the NewWandViewExtent method is:
%
% WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
*/
WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,
wand_view->exception);
wand_view->wand=wand;
wand_view->extent.width=width;
wand_view->extent.height=height;
wand_view->extent.x=x;
wand_view->extent.y=y;
wand_view->number_threads=GetOpenMPMaximumThreads();
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width,
wand_view->number_threads);
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=WandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewDescription() associates a description with an image view.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewDescription(WandView *image_view,const char *description)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
% o description: the wand view description.
%
*/
MagickExport void SetWandViewDescription(WandView *wand_view,
const char *description)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
wand_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewIterator() iterates over the wand view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetWandViewIterator method is:
%
% MagickBooleanType SetWandViewIterator(WandView *destination,
% SetWandViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the wand view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetWandViewIterator(WandView *destination,
SetWandViewMethod set,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(destination != (WandView *) NULL);
assert(destination->signature == WandSignature);
if (set == (SetWandViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (destination->extent.height-destination->extent.y);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(destination_image,destination_image,height,1)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(destination->view);
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
SetPixelBlack(indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_SetWandViewIterator)
#endif
proceed=SetImageProgress(destination_image,destination->description,
progress++,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w T h r e a d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewThreads() sets the number of threads in a thread team.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewThreads(WandView *image_view,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o number_threads: the number of threads in a thread team.
%
*/
MagickExport void SetWandViewThreads(WandView *image_view,
const size_t number_threads)
{
assert(image_view != (WandView *) NULL);
assert(image_view->signature == MagickSignature);
image_view->number_threads=number_threads;
if (number_threads > (size_t) GetMagickResourceLimit(ThreadResource))
image_view->number_threads=GetOpenMPMaximumThreads();
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferWandViewIterator() iterates over two wand views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination wand view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const WandView *source,
% WandView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferWandViewIterator method is:
%
% MagickBooleanType TransferWandViewIterator(WandView *source,
% WandView *destination,TransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferWandViewIterator(WandView *source,
WandView *destination,TransferWandViewMethod transfer,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (TransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict pixels;
register IndexPacket
*magick_restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->extent.width; x++)
SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_TransferWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateWandViewIterator() iterates over the wand view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateWandViewIterator method is:
%
% MagickBooleanType UpdateWandViewIterator(WandView *source,
% UpdateWandViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdateWandViewIterator(WandView *source,
UpdateWandViewMethod update,void *context)
{
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (update == (UpdateWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(
source->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(source->view);
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelBlack(indexes+x));
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->extent.width; x++)
PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->extent.width; x++)
SetPixelBlack(indexes+x,PixelGetBlackQuantum(
source->pixel_wands[id][x]));
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_UpdateWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
logger.h | /*
* logger.h
*
* Created on: 2011/04/11
* Author: shu
*/
#ifndef LOGGER_H_
#define LOGGER_H_
#include <iostream>
#include <ostream>
#include <string>
class Logger {
public:
static Logger* GetInstance() {
static Logger instance;
return &instance;
}
void ErrorLog(std::string message) {
//#pragma omp critical(lock_)
std::cout << "error : " << message << std::endl;
}
void WarningLog(std::string message) {
//#pragma omp critical(lock_)
std::cout << "warning : " << message << std::endl;
}
void Log(std::string message) {
//#pragma omp critical(lock_)
std::cout << message << std::endl;
}
private:
Logger()
{
}
~Logger() {
}
Logger(const Logger& rhs);
Logger operator=(const Logger& rhs);
};
#endif /* LOGGER_H_ */
|
load_data.h | #ifndef CSKNOW_LOAD_DATA_H
#define CSKNOW_LOAD_DATA_H
#define NUM_PLAYERS 10
#include <string>
#include <set>
#include <vector>
#include <iostream>
#include <unordered_map>
#include <list>
using std::string;
using std::vector;
using std::set;
using std::unordered_map;
using std::vector;
#define CT_TEAM 0
#define T_TEAM 1
#define SPECTATOR 2
struct RangeIndexEntry {
int64_t minId, maxId;
};
typedef RangeIndexEntry * RangeIndex;
typedef unordered_map<int64_t, vector<int64_t>> HashmapIndex;
class ColStore {
public:
bool beenInitialized = false;
int64_t size;
vector<string> fileNames;
vector<int64_t> gameStarts;
vector<int64_t> id;
virtual void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
beenInitialized = true;
size = rows;
fileNames.resize(numFiles);
this->gameStarts = gameStarts;
this->id.resize(rows);
}
};
class Equipment : public ColStore {
public:
char ** name;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
name = (char **) malloc(rows * sizeof(char*));
}
Equipment() { };
~Equipment() {
if (!beenInitialized){
return;
}
for (int64_t row = 0; row < size; row++) {
free(name[row]);
}
free(name);
}
Equipment(const Equipment& other) = delete;
Equipment& operator=(const Equipment& other) = delete;
};
class GameTypes : public ColStore {
public:
char ** tableType;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
tableType = (char **) malloc(rows * sizeof(char*));
}
GameTypes() { };
~GameTypes() {
if (!beenInitialized){
return;
}
for (int64_t row = 0; row < size; row++) {
free(tableType[row]);
}
free(tableType);
}
GameTypes(const GameTypes& other) = delete;
GameTypes& operator=(const GameTypes& other) = delete;
};
class HitGroups : public ColStore {
public:
char ** groupName;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
groupName = (char **) malloc(rows * sizeof(char*));
}
HitGroups() { };
~HitGroups() {
if (!beenInitialized){
return;
}
for (int64_t row = 0; row < size; row++) {
free(groupName[row]);
}
free(groupName);
}
HitGroups(const HitGroups& other) = delete;
HitGroups& operator=(const HitGroups& other) = delete;
};
class Games : public ColStore {
public:
char ** demoFile;
double * demoTickRate;
double * gameTickRate;
char ** mapName;
int64_t * gameType;
RangeIndex roundsPerGame;
RangeIndex playersPerGame;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
demoFile = (char **) malloc(rows * sizeof(char*));
demoTickRate = (double *) malloc(rows * sizeof(double));
gameTickRate = (double *) malloc(rows * sizeof(double));
mapName = (char **) malloc(rows * sizeof(char*));
gameType = (int64_t *) malloc(rows * sizeof(int64_t));
roundsPerGame = (RangeIndexEntry *) malloc(rows * sizeof(RangeIndexEntry));
playersPerGame = (RangeIndexEntry *) malloc(rows * sizeof(RangeIndexEntry));
}
Games() { };
~Games() {
if (!beenInitialized){
return;
}
for (int64_t row = 0; row < size; row++) {
free(demoFile[row]);
}
free(demoFile);
free(demoTickRate);
free(gameTickRate);
free(mapName);
free(gameType);
free(roundsPerGame);
free(playersPerGame);
}
Games(const Games& other) = delete;
Games& operator=(const Games& other) = delete;
};
class Players : public ColStore {
public:
int64_t * gameId;
char ** name;
int64_t * steamId;
// add this offset to id to get the row entry
int64_t idOffset = 1;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
gameId = (int64_t *) malloc(rows * sizeof(int64_t));
name = (char **) malloc(rows * sizeof(char*));
steamId = (int64_t *) malloc(rows * sizeof(int64_t));
}
Players() { };
~Players() {
if (!beenInitialized){
return;
}
for (int64_t row = 0; row < size; row++) {
free(name[row]);
}
free(name);
free(gameId);
free(steamId);
}
Players(const Players& other) = delete;
Players& operator=(const Players& other) = delete;
};
class Rounds : public ColStore {
public:
int64_t * gameId;
int64_t * startTick;
int64_t * endTick;
bool * warmup;
int64_t * freezeTimeEnd;
int16_t * roundNumber;
int16_t * roundEndReason;
int16_t * winner;
int16_t * tWins;
int16_t * ctWins;
RangeIndex ticksPerRound;
// add this offset to id to get the row entry
int64_t idOffset = 1;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
gameId = (int64_t *) malloc(rows * sizeof(int64_t));
startTick = (int64_t *) malloc(rows * sizeof(int64_t));
endTick = (int64_t *) malloc(rows * sizeof(int64_t));
warmup = (bool *) malloc(rows * sizeof(bool));
freezeTimeEnd = (int64_t *) malloc(rows * sizeof(int64_t));
roundNumber = (int16_t *) malloc(rows * sizeof(int16_t));
roundEndReason = (int16_t *) malloc(rows * sizeof(int16_t));
winner = (int16_t *) malloc(rows * sizeof(int16_t));
tWins = (int16_t *) malloc(rows * sizeof(int16_t));
ctWins = (int16_t *) malloc(rows * sizeof(int16_t));
ticksPerRound = (RangeIndexEntry *) malloc(rows * sizeof(RangeIndexEntry));
}
Rounds() { };
~Rounds() {
if (!beenInitialized){
return;
}
free(gameId);
free(startTick);
free(endTick);
free(warmup);
free(freezeTimeEnd);
free(roundNumber);
free(roundEndReason);
free(winner);
free(tWins);
free(ctWins);
free(ticksPerRound);
}
Rounds(const Rounds& other) = delete;
Rounds& operator=(const Rounds& other) = delete;
};
class Ticks: public ColStore {
public:
int64_t * roundId;
int64_t * gameTime;
int64_t * demoTickNumber;
int64_t * gameTickNumber;
int64_t * bombCarrier;
double * bombX;
double * bombY;
double * bombZ;
RangeIndex patPerTick;
RangeIndex spottedPerTick;
HashmapIndex weaponFirePerTick;
HashmapIndex killsPerTick;
HashmapIndex hurtPerTick;
HashmapIndex grenadesPerTick;
HashmapIndex grenadesThrowPerTick;
HashmapIndex grenadesActivePerTick;
HashmapIndex grenadesExpiredPerTick;
HashmapIndex grenadesDestroyedPerTick;
HashmapIndex flashedPerTick;
HashmapIndex plantsPerTick;
HashmapIndex plantsStartPerTick;
HashmapIndex plantsEndPerTick;
HashmapIndex defusalsPerTick;
HashmapIndex defusalsStartPerTick;
HashmapIndex defusalsEndPerTick;
HashmapIndex explosionsPerTick;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
roundId = (int64_t *) malloc(rows * sizeof(int64_t));
gameTime = (int64_t *) malloc(rows * sizeof(int64_t));
demoTickNumber = (int64_t *) malloc(rows * sizeof(int64_t));
gameTickNumber = (int64_t *) malloc(rows * sizeof(int64_t));
bombCarrier = (int64_t *) malloc(rows * sizeof(int64_t));
bombX = (double *) malloc(rows * sizeof(double));
bombY = (double *) malloc(rows * sizeof(double));
bombZ = (double *) malloc(rows * sizeof(double));
patPerTick = (RangeIndexEntry *) malloc(rows * sizeof(RangeIndexEntry));
spottedPerTick = (RangeIndexEntry *) malloc(rows * sizeof(RangeIndexEntry));
}
Ticks() { };
~Ticks() {
if (!beenInitialized){
return;
}
free(roundId);
free(gameTime);
free(demoTickNumber);
free(gameTickNumber);
free(bombCarrier);
free(bombX);
free(bombY);
free(bombZ);
free(patPerTick);
free(spottedPerTick);
}
Ticks(const Ticks& other) = delete;
Ticks& operator=(const Ticks& other) = delete;
/*
// since spotted tracks names for spotted player, need to map that to the player index
map<string, int> getPlayerNameToIndex(int64_t gameIndex) const {
map<string, int> result;
for (int i = 0; i < NUM_PLAYERS; i++) {
result.insert({players[i].name[firstRowAfterWarmup[gameIndex]], i});
}
return result;
}
map<int, vector<int>> getEnemiesForTeam(int64_t gameIndex) const {
map<int, vector<int>> result;
result.insert({2, {}});
result.insert({3, {}});
for (int i = 0; i < NUM_PLAYERS; i++) {
if (players[i].team[firstRowAfterWarmup[gameIndex]] == 2) {
result[3].push_back(i);
}
else {
result[2].push_back(i);
}
}
return result;
}
*/
};
class PlayerAtTick: public ColStore {
public:
int64_t * playerId;
int64_t * tickId;
double * posX;
double * posY;
double * posZ;
double * viewX;
double * viewY;
int16_t * team;
double * health;
double * armor;
bool * hasHelmet;
bool * isAlive;
bool * isCrouching;
bool * isAirborne;
double * remainingFlashTime;
int16_t * activeWeapon;
int16_t * primaryWeapon;
int16_t * primaryBulletsClip;
int16_t * primaryBulletsReserve;
int16_t * secondaryWeapon;
int16_t * secondaryBulletsClip;
int16_t * secondaryBulletsReserve;
int16_t * numHe;
int16_t * numFlash;
int16_t * numSmoke;
int16_t * numMolotov;
int16_t * numIncendiary;
int16_t * numDecoy;
int16_t * numZeus;
bool * hasDefuser;
bool * hasBomb;
int32_t * money;
int32_t * ping;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
playerId = (int64_t *) malloc(rows * sizeof(int64_t));
tickId = (int64_t *) malloc(rows * sizeof(int64_t));
posX = (double *) malloc(rows * sizeof(double));
posY = (double *) malloc(rows * sizeof(double));
posZ = (double *) malloc(rows * sizeof(double));
viewX = (double *) malloc(rows * sizeof(double));
viewY = (double *) malloc(rows * sizeof(double));
team = (int16_t *) malloc(rows * sizeof(int16_t));
health = (double *) malloc(rows * sizeof(double));
armor = (double *) malloc(rows * sizeof(double));
hasHelmet = (bool *) malloc(rows * sizeof(bool));
isAlive = (bool *) malloc(rows * sizeof(bool));
isCrouching = (bool *) malloc(rows * sizeof(bool));
isAirborne = (bool *) malloc(rows * sizeof(bool));
remainingFlashTime = (double *) malloc(rows * sizeof(double));
activeWeapon = (int16_t *) malloc(rows * sizeof(int16_t));
primaryWeapon = (int16_t *) malloc(rows * sizeof(int16_t));
primaryBulletsClip = (int16_t *) malloc(rows * sizeof(int16_t));
primaryBulletsReserve = (int16_t *) malloc(rows * sizeof(int16_t));
secondaryWeapon = (int16_t *) malloc(rows * sizeof(int16_t));
secondaryBulletsClip = (int16_t *) malloc(rows * sizeof(int16_t));
secondaryBulletsReserve = (int16_t *) malloc(rows * sizeof(int16_t));
numHe = (int16_t *) malloc(rows * sizeof(int16_t));
numFlash = (int16_t *) malloc(rows * sizeof(int16_t));
numSmoke = (int16_t *) malloc(rows * sizeof(int16_t));
numMolotov = (int16_t *) malloc(rows * sizeof(int16_t));
numIncendiary = (int16_t *) malloc(rows * sizeof(int16_t));
numDecoy = (int16_t *) malloc(rows * sizeof(int16_t));
numZeus = (int16_t *) malloc(rows * sizeof(int16_t));
hasDefuser = (bool *) malloc(rows * sizeof(bool));
hasBomb = (bool *) malloc(rows * sizeof(bool));
money = (int32_t *) malloc(rows * sizeof(int32_t));
ping = (int32_t *) malloc(rows * sizeof(int32_t));
}
void makePitchNeg90To90() {
#pragma omp parallel for
for (int64_t i = 0; i < size; i++) {
if (viewY[i] > 260.0) {
viewY[i] -= 360;
}
}
}
PlayerAtTick() { };
~PlayerAtTick() {
if (!beenInitialized){
return;
}
free(playerId);
free(tickId);
free(posX);
free(posY);
free(posZ);
free(viewX);
free(viewY);
free(team);
free(health);
free(armor);
free(hasHelmet);
free(isAlive);
free(isCrouching);
free(isAirborne);
free(remainingFlashTime);
free(activeWeapon);
free(primaryWeapon);
free(primaryBulletsClip);
free(primaryBulletsReserve);
free(secondaryWeapon);
free(secondaryBulletsClip);
free(secondaryBulletsReserve);
free(numHe);
free(numFlash);
free(numSmoke);
free(numMolotov);
free(numIncendiary);
free(numDecoy);
free(numZeus);
free(hasDefuser);
free(hasBomb);
free(money);
free(ping);
}
PlayerAtTick(const PlayerAtTick& other) = delete;
PlayerAtTick& operator=(const PlayerAtTick& other) = delete;
/*
// since spotted tracks names for spotted player, need to map that to the player index
map<string, int> getPlayerNameToIndex(int64_t gameIndex) const {
map<string, int> result;
for (int i = 0; i < NUM_PLAYERS; i++) {
result.insert({players[i].name[firstRowAfterWarmup[gameIndex]], i});
}
return result;
}
map<int, vector<int>> getEnemiesForTeam(int64_t gameIndex) const {
map<int, vector<int>> result;
result.insert({2, {}});
result.insert({3, {}});
for (int i = 0; i < NUM_PLAYERS; i++) {
if (players[i].team[firstRowAfterWarmup[gameIndex]] == 2) {
result[3].push_back(i);
}
else {
result[2].push_back(i);
}
}
return result;
}
*/
};
class Spotted : public ColStore {
public:
int64_t * tickId;
int64_t * spottedPlayer;
int64_t * spotterPlayer;
bool * isSpotted;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
tickId = (int64_t *) malloc(rows * sizeof(int64_t));
spottedPlayer = (int64_t *) malloc(rows * sizeof(int64_t));
spotterPlayer = (int64_t *) malloc(rows * sizeof(int64_t));
isSpotted = (bool *) malloc(rows * sizeof(bool));
}
Spotted() { };
~Spotted() {
if (!beenInitialized){
return;
}
free(tickId);
free(spottedPlayer);
free(spotterPlayer);
free(isSpotted);
}
Spotted(const Spotted& other) = delete;
Spotted& operator=(const Spotted& other) = delete;
};
class WeaponFire : public ColStore {
public:
int64_t * tickId;
int64_t * shooter;
int16_t * weapon;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
tickId = (int64_t *) malloc(rows * sizeof(int64_t));
shooter = (int64_t *) malloc(rows * sizeof(int64_t));
weapon = (int16_t *) malloc(rows * sizeof(int16_t));
}
WeaponFire() { };
~WeaponFire() {
if (!beenInitialized){
return;
}
free(tickId);
free(shooter);
free(weapon);
}
WeaponFire(const WeaponFire& other) = delete;
WeaponFire& operator=(const WeaponFire& other) = delete;
};
class Kills : public ColStore {
public:
int64_t * tickId;
int64_t * killer;
int64_t * victim;
int16_t * weapon;
int64_t * assister;
bool * isHeadshot;
bool * isWallbang;
int32_t * penetratedObjects;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
tickId = (int64_t *) malloc(rows * sizeof(int64_t));
killer = (int64_t *) malloc(rows * sizeof(int64_t));
victim = (int64_t *) malloc(rows * sizeof(int64_t));
weapon = (int16_t *) malloc(rows * sizeof(int16_t));
assister = (int64_t *) malloc(rows * sizeof(int64_t));
isHeadshot = (bool *) malloc(rows * sizeof(int64_t));
isWallbang = (bool *) malloc(rows * sizeof(int64_t));
penetratedObjects = (int32_t *) malloc(rows * sizeof(int32_t));
}
Kills() { };
~Kills() {
if (!beenInitialized){
return;
}
free(tickId);
free(killer);
free(victim);
free(weapon);
free(assister);
free(isHeadshot);
free(isWallbang);
free(penetratedObjects);
}
Kills(const Kills& other) = delete;
Kills& operator=(const Kills& other) = delete;
};
class Hurt : public ColStore {
public:
int64_t * tickId;
int64_t * victim;
int64_t * attacker;
int16_t * weapon;
int32_t * armorDamage;
int32_t * armor;
int32_t * healthDamage;
int32_t * health;
int64_t * hitGroup;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
tickId = (int64_t *) malloc(rows * sizeof(int64_t));
victim = (int64_t *) malloc(rows * sizeof(int64_t));
attacker = (int64_t *) malloc(rows * sizeof(int64_t));
weapon = (int16_t *) malloc(rows * sizeof(int16_t));
armorDamage = (int32_t *) malloc(rows * sizeof(int32_t));
armor = (int32_t *) malloc(rows * sizeof(int32_t));
healthDamage = (int32_t *) malloc(rows * sizeof(int32_t));
health = (int32_t *) malloc(rows * sizeof(int32_t));
hitGroup = (int64_t *) malloc(rows * sizeof(int64_t));
}
Hurt() { };
~Hurt() {
if (!beenInitialized){
return;
}
free(tickId);
free(victim);
free(attacker);
free(weapon);
free(armorDamage);
free(armor);
free(healthDamage);
free(health);
free(hitGroup);
}
Hurt(const Hurt& other) = delete;
Hurt& operator=(const Hurt& other) = delete;
};
class Grenades : public ColStore {
public:
int64_t * thrower;
int16_t * grenadeType;
int64_t * throwTick;
int64_t * activeTick;
int64_t * expiredTick;
int64_t * destroyTick;
RangeIndex flashedPerGrenade;
RangeIndex trajectoryPerGrenade;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
thrower = (int64_t *) malloc(rows * sizeof(int64_t));
grenadeType = (int16_t *) malloc(rows * sizeof(int16_t));
throwTick = (int64_t *) malloc(rows * sizeof(int64_t));
activeTick = (int64_t *) malloc(rows * sizeof(int64_t));
expiredTick = (int64_t *) malloc(rows * sizeof(int64_t));
destroyTick = (int64_t *) malloc(rows * sizeof(int64_t));
flashedPerGrenade = (RangeIndexEntry *) malloc(rows * sizeof(RangeIndexEntry));
trajectoryPerGrenade = (RangeIndexEntry *) malloc(rows * sizeof(RangeIndexEntry));
}
Grenades() { };
~Grenades() {
if (!beenInitialized){
return;
}
free(thrower);
free(grenadeType);
free(throwTick);
free(activeTick);
free(expiredTick);
free(destroyTick);
free(flashedPerGrenade);
free(trajectoryPerGrenade);
}
Grenades(const Grenades& other) = delete;
Grenades& operator=(const Grenades& other) = delete;
};
class Flashed : public ColStore {
public:
int64_t * tickId;
int64_t * grenadeId;
int64_t * thrower;
int64_t * victim;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
tickId = (int64_t *) malloc(rows * sizeof(int64_t));
grenadeId = (int64_t *) malloc(rows * sizeof(int64_t));
thrower = (int64_t *) malloc(rows * sizeof(int64_t));
victim = (int64_t *) malloc(rows * sizeof(int64_t));
}
Flashed() { };
~Flashed() {
if (!beenInitialized){
return;
}
free(tickId);
free(grenadeId);
free(thrower);
free(victim);
}
Flashed(const Flashed& other) = delete;
Flashed& operator=(const Flashed& other) = delete;
};
class GrenadeTrajectories : public ColStore {
public:
int64_t * grenadeId;
int32_t * idPerGrenade;
double * posX;
double * posY;
double * posZ;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
grenadeId = (int64_t *) malloc(rows * sizeof(int64_t));
idPerGrenade = (int32_t *) malloc(rows * sizeof(int32_t));
posX = (double *) malloc(rows * sizeof(double));
posY = (double *) malloc(rows * sizeof(double));
posZ = (double *) malloc(rows * sizeof(double));
}
GrenadeTrajectories() { };
~GrenadeTrajectories() {
if (!beenInitialized){
return;
}
free(grenadeId);
free(idPerGrenade);
free(posX);
free(posY);
free(posZ);
}
GrenadeTrajectories(const GrenadeTrajectories& other) = delete;
GrenadeTrajectories& operator=(const GrenadeTrajectories& other) = delete;
};
class Plants : public ColStore {
public:
int64_t * startTick;
int64_t * endTick;
int64_t * planter;
bool * succesful;
RangeIndex defusalsPerGrenade;
RangeIndex explosionsPerGrenade;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
startTick = (int64_t *) malloc(rows * sizeof(int64_t));
endTick = (int64_t *) malloc(rows * sizeof(int64_t));
planter = (int64_t *) malloc(rows * sizeof(int64_t));
succesful = (bool *) malloc(rows * sizeof(bool));
defusalsPerGrenade = (RangeIndexEntry *) malloc(rows * sizeof(RangeIndexEntry));
explosionsPerGrenade = (RangeIndexEntry *) malloc(rows * sizeof(RangeIndexEntry));
}
Plants() { };
~Plants() {
if (!beenInitialized){
return;
}
free(startTick);
free(endTick);
free(planter);
free(succesful);
free(defusalsPerGrenade);
free(explosionsPerGrenade);
}
Plants(const Plants& other) = delete;
Plants& operator=(const Plants& other) = delete;
};
class Defusals : public ColStore {
public:
int64_t * plantId;
int64_t * startTick;
int64_t * endTick;
int64_t * defuser;
bool * succesful;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
plantId = (int64_t *) malloc(rows * sizeof(int64_t));
startTick = (int64_t *) malloc(rows * sizeof(int64_t));
endTick = (int64_t *) malloc(rows * sizeof(int64_t));
defuser = (int64_t *) malloc(rows * sizeof(int64_t));
succesful = (bool *) malloc(rows * sizeof(bool));
}
Defusals() { };
~Defusals() {
if (!beenInitialized){
return;
}
free(plantId);
free(startTick);
free(endTick);
free(defuser);
free(succesful);
}
Defusals(const Defusals& other) = delete;
Defusals& operator=(const Defusals& other) = delete;
};
class Explosions : public ColStore {
public:
int64_t * plantId;
int64_t * tickId;
void init(int64_t rows, int64_t numFiles, vector<int64_t> gameStarts) {
ColStore::init(rows, numFiles, gameStarts);
plantId = (int64_t *) malloc(rows * sizeof(int64_t));
tickId = (int64_t *) malloc(rows * sizeof(int64_t));
}
Explosions() { };
~Explosions() {
if (!beenInitialized){
return;
}
free(plantId);
free(tickId);
}
Explosions(const Explosions& other) = delete;
Explosions& operator=(const Explosions& other) = delete;
};
void loadData(Equipment & equipment, GameTypes & gameTypes, HitGroups & hitGroups, Games & games, Players & players,
Rounds & rounds, Ticks & ticks, PlayerAtTick & playerAtTick, Spotted & spotted, WeaponFire & weaponFire,
Kills & kills, Hurt & hurt, Grenades & grenades, Flashed & flashed, GrenadeTrajectories & grenadeTrajectories,
Plants & plants, Defusals & defusals, Explosions & explosions, string dataPath);
void buildIndexes(Equipment & equipment, GameTypes & gameTypes, HitGroups & hitGroups, Games & games,
Players & players, Rounds & rounds, Ticks & ticks, PlayerAtTick & playerAtTick, Spotted & spotted,
WeaponFire & weaponFire, Kills & kills, Hurt & hurt, Grenades & grenades, Flashed & flashed,
GrenadeTrajectories & grenadeTrajectories, Plants & plants, Defusals & defusals, Explosions & explosions);
#endif //CSKNOW_LOAD_DATA_H
|
bodysystemcpu_impl.h | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include "bodysystemcpu.h"
#include <assert.h>
#include <memory.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <helper_cuda.h>
#include <algorithm>
#include "tipsy.h"
#ifdef OPENMP
#include <omp.h>
#endif
template <typename T>
BodySystemCPU<T>::BodySystemCPU(int numBodies)
: m_numBodies(numBodies),
m_bInitialized(false),
m_force(0),
m_softeningSquared(.00125f),
m_damping(0.995f)
{
m_pos = 0;
m_vel = 0;
_initialize(numBodies);
}
template <typename T>
BodySystemCPU<T>::~BodySystemCPU()
{
_finalize();
m_numBodies = 0;
}
template <typename T>
void BodySystemCPU<T>::_initialize(int numBodies)
{
assert(!m_bInitialized);
m_numBodies = numBodies;
m_pos = new T[m_numBodies*4];
m_vel = new T[m_numBodies*4];
m_force = new T[m_numBodies*3];
memset(m_pos, 0, m_numBodies*4*sizeof(T));
memset(m_vel, 0, m_numBodies*4*sizeof(T));
memset(m_force, 0, m_numBodies*3*sizeof(T));
m_bInitialized = true;
}
template <typename T>
void BodySystemCPU<T>::_finalize()
{
assert(m_bInitialized);
delete [] m_pos;
delete [] m_vel;
delete [] m_force;
m_bInitialized = false;
}
template <typename T>
void BodySystemCPU<T>::loadTipsyFile(const std::string &filename)
{
if (m_bInitialized)
_finalize();
vector< typename vec4<T>::Type > positions;
vector< typename vec4<T>::Type > velocities;
vector< int> ids;
int nBodies = 0;
int nFirst=0, nSecond=0, nThird=0;
read_tipsy_file(positions,
velocities,
ids,
filename,
nBodies,
nFirst,
nSecond,
nThird);
_initialize(nBodies);
memcpy(m_pos, &positions[0], sizeof(vec4<T>)*nBodies);
memcpy(m_vel, &velocities[0], sizeof(vec4<T>)*nBodies);
}
template <typename T>
void BodySystemCPU<T>::update(T deltaTime)
{
assert(m_bInitialized);
_integrateNBodySystem(deltaTime);
//std::swap(m_currentRead, m_currentWrite);
}
template <typename T>
T *BodySystemCPU<T>::getArray(BodyArray array)
{
assert(m_bInitialized);
T *data = 0;
switch (array)
{
default:
case BODYSYSTEM_POSITION:
data = m_pos;
break;
case BODYSYSTEM_VELOCITY:
data = m_vel;
break;
}
return data;
}
template <typename T>
void BodySystemCPU<T>::setArray(BodyArray array, const T *data)
{
assert(m_bInitialized);
T *target = 0;
switch (array)
{
default:
case BODYSYSTEM_POSITION:
target = m_pos;
break;
case BODYSYSTEM_VELOCITY:
target = m_vel;
break;
}
memcpy(target, data, m_numBodies*4*sizeof(T));
}
template<typename T>
T sqrt_T(T x)
{
return sqrt(x);
}
template<>
float sqrt_T<float>(float x)
{
return sqrtf(x);
}
template <typename T>
void bodyBodyInteraction(T accel[3], T posMass0[4], T posMass1[4], T softeningSquared)
{
T r[3];
// r_01 [3 FLOPS]
r[0] = posMass1[0] - posMass0[0];
r[1] = posMass1[1] - posMass0[1];
r[2] = posMass1[2] - posMass0[2];
// d^2 + e^2 [6 FLOPS]
T distSqr = r[0] * r[0] + r[1] * r[1] + r[2] * r[2];
distSqr += softeningSquared;
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
T invDist = (T)1.0 / (T)sqrt((double)distSqr);
T invDistCube = invDist * invDist * invDist;
// s = m_j * invDistCube [1 FLOP]
T s = posMass1[3] * invDistCube;
// (m_1 * r_01) / (d^2 + e^2)^(3/2) [6 FLOPS]
accel[0] += r[0] * s;
accel[1] += r[1] * s;
accel[2] += r[2] * s;
}
template <typename T>
void BodySystemCPU<T>::_computeNBodyGravitation()
{
#ifdef OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < m_numBodies; i++)
{
int indexForce = 3*i;
T acc[3] = {0, 0, 0};
// We unroll this loop 4X for a small performance boost.
int j = 0;
while (j < m_numBodies)
{
bodyBodyInteraction<T>(acc, &m_pos[4*i], &m_pos[4*j], m_softeningSquared);
j++;
bodyBodyInteraction<T>(acc, &m_pos[4*i], &m_pos[4*j], m_softeningSquared);
j++;
bodyBodyInteraction<T>(acc, &m_pos[4*i], &m_pos[4*j], m_softeningSquared);
j++;
bodyBodyInteraction<T>(acc, &m_pos[4*i], &m_pos[4*j], m_softeningSquared);
j++;
}
m_force[indexForce ] = acc[0];
m_force[indexForce+1] = acc[1];
m_force[indexForce+2] = acc[2];
}
}
template <typename T>
void BodySystemCPU<T>::_integrateNBodySystem(T deltaTime)
{
_computeNBodyGravitation();
#ifdef OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < m_numBodies; ++i)
{
int index = 4*i;
int indexForce = 3*i;
T pos[3], vel[3], force[3];
pos[0] = m_pos[index+0];
pos[1] = m_pos[index+1];
pos[2] = m_pos[index+2];
T invMass = m_pos[index+3];
vel[0] = m_vel[index+0];
vel[1] = m_vel[index+1];
vel[2] = m_vel[index+2];
force[0] = m_force[indexForce+0];
force[1] = m_force[indexForce+1];
force[2] = m_force[indexForce+2];
// acceleration = force / mass;
// new velocity = old velocity + acceleration * deltaTime
vel[0] += (force[0] * invMass) * deltaTime;
vel[1] += (force[1] * invMass) * deltaTime;
vel[2] += (force[2] * invMass) * deltaTime;
vel[0] *= m_damping;
vel[1] *= m_damping;
vel[2] *= m_damping;
// new position = old position + velocity * deltaTime
pos[0] += vel[0] * deltaTime;
pos[1] += vel[1] * deltaTime;
pos[2] += vel[2] * deltaTime;
m_pos[index+0] = pos[0];
m_pos[index+1] = pos[1];
m_pos[index+2] = pos[2];
m_vel[index+0] = vel[0];
m_vel[index+1] = vel[1];
m_vel[index+2] = vel[2];
}
}
|
multiimagereconstructor.h | #ifndef MULTILINEARRECONSTRUCTION_MULTIIMAGERECONSTRUCTOR_H
#define MULTILINEARRECONSTRUCTION_MULTIIMAGERECONSTRUCTOR_H
#ifndef MKL_BLAS
#define MKL_BLAS MKL_DOMAIN_BLAS
#endif
#define EIGEN_USE_MKL_ALL
#include <eigen3/Eigen/Dense>
#include <eigen3/Eigen/Geometry>
#include <eigen3/Eigen/LU>
#include "ceres/ceres.h"
#include <opencv2/opencv.hpp>
#include "basicmesh.h"
#include "common.h"
#include "constraints.h"
#include "costfunctions.h"
#include "multilinearmodel.h"
#include "parameters.h"
#include "singleimagereconstructor.hpp"
#include "statsutils.h"
#include "utils.hpp"
#include "OffscreenMeshVisualizer.h"
#include "AAM/aammodel.h"
#include "boost/filesystem/operations.hpp"
#include "boost/filesystem/path.hpp"
namespace fs = boost::filesystem;
using namespace Eigen;
namespace {
struct PixelInfo {
PixelInfo() : fidx(-1) {}
PixelInfo(int fidx, glm::vec3 bcoords) : fidx(fidx), bcoords(bcoords) {}
int fidx; // trinagle index
glm::vec3 bcoords; // bary centric coordinates
};
inline void encode_index(int idx, unsigned char& r, unsigned char& g, unsigned char& b) {
r = static_cast<unsigned char>(idx & 0xff); idx >>= 8;
g = static_cast<unsigned char>(idx & 0xff); idx >>= 8;
b = static_cast<unsigned char>(idx & 0xff);
}
inline int decode_index(unsigned char r, unsigned char g, unsigned char b, int& idx) {
idx = b; idx <<= 8; idx |= g; idx <<= 8; idx |= r;
return idx;
}
template <typename T>
T clamp(T val, T lower, T upper) {
return std::max(lower, std::min(upper, val));
}
inline glm::dvec3 bilinear_sample(const QImage& img, double x, double y) {
int x0 = floor(x), x1 = x0 + 1;
int y0 = floor(y), y1 = y0 + 1;
if(x0 < 0 || y0 < 0) return glm::dvec3(-1, -1, -1);
if(x1 >= img.width() || y1 >= img.height()) return glm::dvec3(-1, -1, -1);
double c0 = x - x0, c0c = 1 - c0;
double c1 = y - y0, c1c = 1 - c1;
QRgb p00 = img.pixel(x0, y0);
QRgb p01 = img.pixel(x1, y0);
QRgb p10 = img.pixel(x0, y1);
QRgb p11 = img.pixel(x1, y1);
double r = c0c * c1c * qRed(p00) + c0c * c1 * qRed(p01) + c0 * c1c * qRed(p10) + c0 * c1 * qRed(p11);
double g = c0c * c1c * qGreen(p00) + c0c * c1 * qGreen(p01) + c0 * c1c * qGreen(p10) + c0 * c1 * qGreen(p11);
double b = c0c * c1c * qBlue(p00) + c0c * c1 * qBlue(p01) + c0 * c1c * qBlue(p10) + c0 * c1 * qBlue(p11);
return glm::dvec3(r, g, b);
}
inline pair<set<int>, vector<int>> FindTrianglesIndices(const QImage& img) {
int w = img.width(), h = img.height();
set<int> S;
vector<int> indices_map(w*h);
for(int i=0, pidx = 0;i<h;++i) {
for(int j=0;j<w;++j, ++pidx) {
QRgb pix = img.pixel(j, i);
unsigned char r = static_cast<unsigned char>(qRed(pix));
unsigned char g = static_cast<unsigned char>(qGreen(pix));
unsigned char b = static_cast<unsigned char>(qBlue(pix));
if(r == 0 && g == 0 && b == 0) {
indices_map[pidx] = -1;
continue;
}
else {
int idx;
decode_index(r, g, b, idx);
S.insert(idx);
indices_map[pidx] = idx;
}
}
}
return make_pair(S, indices_map);
}
static QImage TransferColor(const QImage& source, const QImage& target,
const vector<int>& valid_pixels_s,
const vector<int>& valid_pixels_t) {
// Make a copy
QImage result = source;
const int num_rows_s = source.height(), num_cols_s = source.width();
const int num_rows_t = target.height(), num_cols_t = target.width();
const size_t num_pixels_s = valid_pixels_s.size();
const size_t num_pixels_t = valid_pixels_t.size();
Matrix3d RGB2LMS, LMS2RGB;
RGB2LMS << 0.3811, 0.5783, 0.0402,
0.1967, 0.7244, 0.0782,
0.0241, 0.1288, 0.8444;
LMS2RGB << 4.4679, -3.5873, 0.1193,
-1.2186, 2.3809, -0.1624,
0.0497, -0.2439, 1.2045;
Matrix3d b, c, b2, c2;
b << 1.0/sqrt(3.0), 0, 0,
0, 1.0/sqrt(6.0), 0,
0, 0, 1.0/sqrt(2.0);
c << 1, 1, 1,
1, 1, -2,
1, -1, 0;
b2 << sqrt(3.0)/3.0, 0, 0,
0, sqrt(6.0)/6.0, 0,
0, 0, sqrt(2.0)/2.0;
c2 << 1, 1, 1,
1, 1, -1,
1, -2, 0;
Matrix3d LMS2lab = b * c;
Matrix3d lab2LMS = c2 * b2;
auto unpack_pixel = [](QRgb pix) {
int r = max(1, qRed(pix)), g = max(1, qGreen(pix)), b = max(1, qBlue(pix));
return make_tuple(r, g, b);
};
auto compute_image_stats = [&](const QImage& img, const vector<int>& valid_pixels) {
const size_t num_pixels = valid_pixels.size();
const int num_cols = img.width(), num_rows = img.height();
MatrixXd pixels(3, num_pixels);
cout << num_cols << 'x' << num_rows << endl;
for(size_t i=0;i<num_pixels;++i) {
int y = valid_pixels[i] / num_cols;
int x = valid_pixels[i] % num_cols;
int r, g, b;
tie(r, g, b) = unpack_pixel(img.pixel(x, y));
pixels.col(i) = Vector3d(r / 255.0, g / 255.0, b / 255.0);
}
MatrixXd pixels_LMS = RGB2LMS * pixels;
for(int i=0;i<3;i++) {
for(int j=0;j<num_pixels;++j) {
pixels_LMS(i, j) = log10(pixels_LMS(i, j));
}
}
MatrixXd pixels_lab = LMS2lab * pixels_LMS;
Vector3d mean = pixels_lab.rowwise().mean();
Vector3d stdev(0, 0, 0);
for(int i=0;i<num_pixels;++i) {
Vector3d diff = pixels_lab.col(i) - mean;
stdev += Vector3d(diff[0]*diff[0], diff[1]*diff[1], diff[2]*diff[2]);
}
stdev /= (num_pixels - 1);
for(int i=0;i<3;++i) stdev[i] = sqrt(stdev[i]);
cout << "mean: " << mean << endl;
cout << "std: " << stdev << endl;
return make_tuple(pixels_lab, mean, stdev);
};
// Compute stats of both images
MatrixXd lab_s, lab_t;
Vector3d mean_s, std_s, mean_t, std_t;
tie(lab_s, mean_s, std_s) = compute_image_stats(source, valid_pixels_s);
tie(lab_t, mean_t, std_t) = compute_image_stats(target, valid_pixels_t);
// Do the transfer
MatrixXd res(3, num_pixels_s);
for(int i=0;i<3;++i) {
for(int j=0;j<num_pixels_s;++j) {
//res(i, j) = (lab_s(i, j) - mean_s[i]) * std_t[i] / std_s[i] + mean_t[i];
res(i, j) = (lab_s(i, j) - mean_s[i]) + mean_t[i];
}
}
MatrixXd LMS_res = lab2LMS * res;
for(int i=0;i<3;++i) {
for(int j=0;j<num_pixels_s;++j) {
LMS_res(i, j) = pow(10, LMS_res(i, j));
}
}
MatrixXd est_im = LMS2RGB * LMS_res;
for(size_t i=0;i<num_pixels_s;++i) {
int y = valid_pixels_s[i] / num_cols_s;
int x = valid_pixels_s[i] % num_cols_s;
result.setPixel(x, y, qRgb(clamp<double>(est_im(0, i) * 255.0, 0., 255.),
clamp<double>(est_im(1, i) * 255.0, 0., 255.),
clamp<double>(est_im(2, i) * 255.0, 0., 255.)));
}
return result;
}
}
template <typename Constraint>
class MultiImageReconstructor {
public:
MultiImageReconstructor():
enable_selection(true),
enable_failure_detection(true),
direct_multi_recon(false) {}
void LoadModel(const string& filename) {
model = MultilinearModel(filename);
single_recon.LoadModel(filename);
}
void LoadPriors(const string& filename_id, const string& filename_exp) {
prior.load(filename_id, filename_exp);
single_recon.LoadPriors(filename_id, filename_exp);
}
void SetContourIndices(const vector<vector<int>>& contour_indices_in) {
contour_indices = contour_indices_in;
single_recon.SetContourIndices(contour_indices_in);
}
void SetMesh(const BasicMesh& mesh) {
template_mesh = mesh;
}
void SetIndices(const vector<int>& indices) {
init_indices = indices;
}
void AddImagePointsPair(const string& filename, const pair<QImage, vector<Constraint>>& p) {
image_filenames.push_back(filename);
image_points_pairs.push_back(p);
}
bool Reconstruct();
const Vector3d& GetRotation(int imgidx) const { return param_sets[imgidx].model.R; }
const Vector3d& GetTranslation(int imgidx) const { return param_sets[imgidx].model.T; }
const VectorXd& GetIdentityWeights(int imgidx) const { return param_sets[imgidx].model.Wid; }
const VectorXd& GetExpressionWeights(int imgidx) const { return param_sets[imgidx].model.Wexp_FACS; }
const Tensor1& GetGeometry(int imgidx) const {
model.ApplyWeights(GetIdentityWeights(imgidx), GetExpressionWeights(imgidx));
return model.GetTM();
}
const CameraParameters GetCameraParameters(int imgidx) const { return param_sets[imgidx].cam; }
const vector<int> GetIndices(int imgidx) const { return param_sets[imgidx].indices; }
vector<int> GetUpdatedIndices(int imgidx) const {
vector<int> idxs;
for(int i=0;i<param_sets[imgidx].recon.cons.size();++i) {
idxs.push_back(param_sets[imgidx].recon.cons[i].vidx);
}
return idxs;
}
void SetSelectionState(bool val) { enable_selection = val; }
void SetFailureDetectionState(bool val) { enable_failure_detection = val; }
void SetDirectMultiRecon(bool val) { direct_multi_recon = val; }
void SetProgressiveReconState(bool val) { enable_progressive_recon = val; }
protected:
void VisualizeReconstructionResult(const fs::path& folder, int i, bool scale_output=true) {
// Visualize the reconstruction results
#if 0
MeshVisualizer w("reconstruction result", param_sets[i].mesh);
w.BindConstraints(image_points_pairs[i].second);
w.BindImage(image_points_pairs[i].first);
w.BindLandmarks(init_indices);
w.BindUpdatedLandmarks(param_sets[i].indices);
w.SetMeshRotationTranslation(param_sets[i].model.R, param_sets[i].model.T);
w.SetCameraParameters(param_sets[i].cam);
w.resize(image_points_pairs[i].first.width(), image_points_pairs[i].first.height());
w.show();
w.paintGL();
w.update();
QImage recon_image = w.grabFrameBuffer();
fs::path image_path = fs::path(image_filenames[i]);
recon_image.save( (folder / fs::path(image_path.stem().string() + ".png")).string().c_str() );
#else
int imgw = image_points_pairs[i].first.width();
int imgh = image_points_pairs[i].first.height();
if(scale_output) {
const int target_size = 640;
double scale = static_cast<double>(target_size) / imgw;
imgw *= scale;
imgh *= scale;
}
const string home_directory = QDir::homePath().toStdString();
cout << "Home dir: " << home_directory << endl;
OffscreenMeshVisualizer visualizer(imgw, imgh);
// Always compute normal
param_sets[i].mesh.ComputeNormals();
visualizer.SetMVPMode(OffscreenMeshVisualizer::CamPerspective);
visualizer.SetRenderMode(OffscreenMeshVisualizer::MeshAndImage);
visualizer.BindMesh(param_sets[i].mesh);
visualizer.BindImage(image_points_pairs[i].first);
visualizer.SetCameraParameters(param_sets[i].cam);
visualizer.SetMeshRotationTranslation(param_sets[i].model.R, param_sets[i].model.T);
visualizer.SetIndexEncoded(false);
visualizer.SetEnableLighting(true);
visualizer.LoadRenderingSettings(home_directory + "/Data/Settings/blendshape_vis_ao.json");
QImage img = visualizer.Render(true);
fs::path image_path = fs::path(image_filenames[i]);
img.save((folder / fs::path(image_path.stem().string() + ".png")).string().c_str());
#endif
}
private:
MultilinearModel model;
MultilinearModelPrior prior;
vector<vector<int>> contour_indices;
vector<int> init_indices;
BasicMesh template_mesh;
struct ParameterSet {
vector<int> indices;
BasicMesh mesh;
CameraParameters cam;
ModelParameters model;
ReconstructionParameters<Constraint> recon;
OptimizationParameters opt;
ReconstructionStats stats;
string img_filename;
};
// Input image points pairs
vector<pair<QImage, vector<Constraint>>> image_points_pairs;
vector<string> image_filenames;
// AAM model for consistent set selection
aam::AAMModel aam;
// A set of parameters for each image
vector<ParameterSet> param_sets;
// The worker for single image reconstruction
SingleImageReconstructor<Constraint> single_recon;
bool enable_selection;
bool enable_failure_detection;
bool enable_progressive_recon;
bool direct_multi_recon;
};
namespace {
void safe_create(const fs::path& p) {
if(fs::exists(p)) fs::remove_all(p);
fs::create_directory(p);
}
}
template <typename Constraint>
bool MultiImageReconstructor<Constraint>::Reconstruct() {
cout << "Reconstruction begins..." << endl;
const string home_directory = QDir::homePath().toStdString();
cout << "Home dir: " << home_directory << endl;
// Preparing necessary stuff
const int tex_size = 2048;
const string albedo_index_map_filename(home_directory + "/Data/Multilinear/albedo_index.png");
const string albedo_pixel_map_filename(home_directory + "/Data/Multilinear/albedo_pixel.png");
const string valid_faces_indices_filename(home_directory + "/Data/Multilinear/face_region_indices.txt");
QImage albedo_index_map;
// Get the albedo index map
if(QFile::exists(albedo_index_map_filename.c_str())) {
message("loading index map for albedo.");
albedo_index_map = QImage(albedo_index_map_filename.c_str());
albedo_index_map.save("albedo_index.png");
} else {
cerr << "albedo index map does not exist. Abort." << endl;
exit(1);
}
auto valid_faces_indices_quad = LoadIndices(valid_faces_indices_filename);
// @HACK each quad face is triangulated, so the indices change from i to [2*i, 2*i+1]
vector<int> valid_faces_indices;
for(auto fidx : valid_faces_indices_quad) {
valid_faces_indices.push_back(fidx*2);
valid_faces_indices.push_back(fidx*2+1);
}
// Compute the barycentric coordinates for each pixel
vector<vector<PixelInfo>> albedo_pixel_map(tex_size, vector<PixelInfo>(tex_size));
// Generate pixel map for albedo
bool gen_pixel_map = false;
QImage pixel_map_image;
if(QFile::exists(albedo_pixel_map_filename.c_str())) {
pixel_map_image = QImage(albedo_pixel_map_filename.c_str());
message("generating pixel map for albedo ...");
boost::timer::auto_cpu_timer t("pixel map for albedo generation time = %w seconds.\n");
for(int i=0;i<tex_size;++i) {
for(int j=0;j<tex_size;++j) {
QRgb pix = albedo_index_map.pixel(j, i);
unsigned char r = static_cast<unsigned char>(qRed(pix));
unsigned char g = static_cast<unsigned char>(qGreen(pix));
unsigned char b = static_cast<unsigned char>(qBlue(pix));
if(r == 0 && g == 0 && b == 0) continue;
int fidx;
decode_index(r, g, b, fidx);
QRgb bcoords_pix = pixel_map_image.pixel(j, i);
float x = static_cast<float>(qRed(bcoords_pix)) / 255.0f;
float y = static_cast<float>(qGreen(bcoords_pix)) / 255.0f;
float z = static_cast<float>(qBlue(bcoords_pix)) / 255.0f;
albedo_pixel_map[i][j] = PixelInfo(fidx, glm::vec3(x, y, z));
}
}
message("done.");
} else {
cerr << "albedo pixel map does not exist. Abort." << endl;
exit(1);
}
vector<vector<glm::dvec3>> mean_texture(tex_size, vector<glm::dvec3>(tex_size, glm::dvec3(0, 0, 0)));
cv::Mat mean_texture_mat(tex_size, tex_size, CV_64FC3);
vector<vector<double>> mean_texture_weight(tex_size, vector<double>(tex_size, 0));
QImage mean_texture_image;
// Misc stuff
cout << image_filenames.size() << endl;
fs::path image_path = fs::path(image_filenames.front()).parent_path();
fs::path result_path = image_path / fs::path("multi_recon");
cout << "creating directory " << result_path.string() << endl;
safe_create(result_path);
cout << "directory created ..." << endl;
// Initialize the parameter sets
param_sets.resize(image_points_pairs.size());
for(size_t i=0;i<param_sets.size();++i) {
auto& params = param_sets[i];
params.img_filename = fs::path(image_filenames[i]).filename().string();
params.indices = init_indices;
params.mesh = template_mesh;
const int image_width = image_points_pairs[i].first.width();
const int image_height = image_points_pairs[i].first.height();
// camera parameters
cout << image_width << "x" << image_height << endl;
params.cam = CameraParameters::DefaultParameters(image_width, image_height);
cout << params.cam.image_size.x << ", " << params.cam.image_size.y << endl;
// model parameters
params.model = ModelParameters::DefaultParameters(prior.Uid, prior.Uexp);
// reconstruction parameters
params.recon.cons = image_points_pairs[i].second;
params.recon.imageWidth = image_width;
params.recon.imageHeight = image_height;
}
const int num_images = image_points_pairs.size();
// Initialize AAM model
auto constraints_to_mat = [=](const vector<Constraint>& constraints, int h) {
const int npoints = constraints.size();
cv::Mat m(npoints, 2, CV_64FC1);
for(int j=0;j<npoints;++j) {
m.at<double>(j, 0) = constraints[j].data.x;
m.at<double>(j, 1) = h - constraints[j].data.y;
}
return m;
};
vector<int> inliers;
if(enable_failure_detection) {
vector<QImage> images(image_points_pairs.size());
vector<cv::Mat> points(image_points_pairs.size());
// Collect input images and points
for(int i=0;i<image_points_pairs.size();++i) {
images[i] = image_points_pairs[i].first;
points[i] = constraints_to_mat(image_points_pairs[i].second,
image_points_pairs[i].first.height());
}
aam.SetOutputPath(result_path.string());
aam.SetImages(images);
aam.SetPoints(points);
aam.Preprocess();
aam.SetErrorMetric(aam::AAMModel::Hybrid);
// For Debugging
inliers = aam.FindInliers_Iterative();
} else {
inliers.resize(num_images);
iota(inliers.begin(), inliers.end(), 0);
}
VectorXd identity_centroid;
// Main reconstruction loop
// 1. Use single image reconstructor to do per-image reconstruction first
// 2. Select a consistent set of images for joint reconstruction
// 3. Convergence test. If not converged, goto step 1.
const int max_iters_main_loop = enable_progressive_recon?3:1;
int iters_main_loop = 0;
vector<MatrixXd> identity_weights_history;
vector<VectorXd> identity_weights_centroid_history;
vector<int> consistent_set, final_chosen_set;
// Initialize the consistent set to inliers
#if 0
consistent_set.resize(num_images);
iota(consistent_set.begin(), consistent_set.end(), 0);
#else
consistent_set = inliers;
#endif
while(iters_main_loop++ < max_iters_main_loop){
fs::path step_result_path = result_path / fs::path("step" + to_string(iters_main_loop));
safe_create(step_result_path);
// Single image reconstruction step
OptimizationParameters opt_params = OptimizationParameters::Defaults();
opt_params.w_prior_id = 10 * pow(iters_main_loop, 0.25);
opt_params.w_prior_exp = 10;
opt_params.num_initializations = 1;
opt_params.perturbation_range = 0.01;
opt_params.errorThreshold = 0.01;
fs::path step_single_recon_result_path = step_result_path / fs::path("single_recon");
safe_create(step_single_recon_result_path);
for(int i=0;i<num_images;++i) {
single_recon.SetMesh(param_sets[i].mesh);
single_recon.SetIndices(param_sets[i].indices);
single_recon.SetImageSize(param_sets[i].recon.imageWidth, param_sets[i].recon.imageHeight);
single_recon.SetConstraints(param_sets[i].recon.cons);
single_recon.SetInitialParameters(param_sets[i].model, param_sets[i].cam);
if(iters_main_loop > 1) single_recon.SetIdentityPrior(identity_centroid);
// Perform reconstruction
if(!direct_multi_recon) {
boost::timer::auto_cpu_timer t("Single image reconstruction finished in %w seconds.\n");
single_recon.Reconstruct(opt_params);
} else continue;
// Store results
auto tm = single_recon.GetGeometry();
param_sets[i].mesh.UpdateVertices(tm);
param_sets[i].mesh.ComputeNormals();
param_sets[i].model = single_recon.GetModelParameters();
param_sets[i].indices = single_recon.GetIndices();
param_sets[i].cam = single_recon.GetCameraParameters();
if (true) {
VisualizeReconstructionResult(step_single_recon_result_path, i);
fs::path image_path = fs::path(image_filenames[i]);
single_recon.SaveReconstructionResults( (step_single_recon_result_path / fs::path(image_path.stem().string() + ".res")).string());
}
}
// TODO Parameters estimation step, choose a consistent set of images for joint
// optimization
MatrixXd identity_weights(param_sets[0].model.Wid.rows(), num_images);
for(int i=0;i<num_images;++i) {
identity_weights.col(i) = param_sets[i].model.Wid;
}
identity_weights_history.push_back(identity_weights);
// Remove outliers
fs::path selection_result_path = step_result_path / fs::path("selection");
safe_create(selection_result_path);
int selection_method = enable_selection?1:2;
switch(selection_method) {
case 0: {
const double ratios[] = {0.0, 0.4, 0.6, 0.8};
consistent_set = StatsUtils::FindConsistentSet(identity_weights, 0.5, ratios[iters_main_loop] * num_images, &identity_centroid);
assert(consistent_set.size() > 0);
for(auto i : consistent_set) {
VisualizeReconstructionResult(selection_result_path, i);
}
break;
}
case 1: {
double ratios[] = {0.0, 0.4, 0.6, 0.8};
// HACK for testing the system without progressive reconstruction
if(max_iters_main_loop == 1) ratios[1] = 0.8;
// Take the first few as good shape
int k = max(1, static_cast<int>(ratios[iters_main_loop] * num_images));
consistent_set.clear();
auto take_first_k = [](vector<pair<int, double>> stats, int k) {
set<int> subset;
std::sort(stats.begin(), stats.end(), [](pair<int,double> a, pair<int, double> b){
return a.second < b.second;
});
for(int i=0;i<k;++i) {
subset.insert(stats[i].first);
}
return subset;
};
// Choose the ones with smallest error, not very useful
vector<pair<int, double>> errors(num_images);
for(int i=0;i<num_images;++i) {
errors[i] = make_pair(i, param_sets[i].stats.avg_error);
}
auto subset_error = take_first_k(errors, num_images);
for(auto sx : subset_error) cout << sx << ' '; cout << endl;
// Compute the distance to mean identity weights, choose the close ones
VectorXd mean_identity = StatsUtils::mean(identity_weights, 2);
vector<pair<int, double>> d_identity(num_images);
for(int i=0;i<num_images;++i) {
d_identity[i] = make_pair(i, (identity_weights.col(i) - mean_identity).norm());
}
auto subset_identity = take_first_k(d_identity, k);
for(auto sx : subset_identity) cout << sx << ' '; cout << endl;
// Compute the norm of the expression weights, choose the smaller ones
vector<pair<int, double>> n_expression(num_images);
for(int i=0;i<num_images;++i) {
n_expression[i] = make_pair(i, (param_sets[i].model.Wexp_FACS).norm());
}
auto subset_expression = take_first_k(n_expression, 0.8 * num_images);
for(auto sx : subset_expression) cout << sx << ' '; cout << endl;
#if 1
if(iters_main_loop == 1) {
// Compute the RMSE of color transferred texture
// Collect texture information from each input (image, mesh) pair to obtain mean texture
bool generate_mean_texture = true;
vector<vector<int>> face_indices_maps;
{
for(int img_i=0;img_i<num_images;++img_i) {
const auto& mesh = param_sets[img_i].mesh;
// for each image bundle, render the mesh to FBO with culling to get the visible triangles
OffscreenMeshVisualizer visualizer(image_points_pairs[img_i].first.width(),
image_points_pairs[img_i].first.height());
visualizer.SetMVPMode(OffscreenMeshVisualizer::CamPerspective);
visualizer.SetRenderMode(OffscreenMeshVisualizer::Mesh);
visualizer.BindMesh(param_sets[img_i].mesh);
visualizer.SetCameraParameters(param_sets[img_i].cam);
visualizer.SetMeshRotationTranslation(param_sets[img_i].model.R, param_sets[img_i].model.T);
visualizer.SetIndexEncoded(true);
visualizer.SetEnableLighting(false);
QImage img = visualizer.Render();
//img.save("mesh.png");
// find the visible triangles from the index map
auto triangles_indices_pair = FindTrianglesIndices(img);
set<int> triangles = triangles_indices_pair.first;
face_indices_maps.push_back(triangles_indices_pair.second);
cerr << "triangles = " << triangles.size() << endl;
// get the projection parameters
glm::dmat4 Rmat = glm::eulerAngleYXZ(param_sets[img_i].model.R[0],
param_sets[img_i].model.R[1],
param_sets[img_i].model.R[2]);
glm::dmat4 Tmat = glm::translate(glm::dmat4(1.0),
glm::dvec3(param_sets[img_i].model.T[0],
param_sets[img_i].model.T[1],
param_sets[img_i].model.T[2]));
glm::dmat4 Mview = Tmat * Rmat;
// FOR DEBUGGING
#if 0
// for each visible triangle, compute the coordinates of its 3 corners
QImage img_vertices = img;
vector<vector<glm::dvec3>> triangles_projected;
for(auto tidx : triangles) {
auto face_i = mesh.face(tidx);
auto v0_mesh = mesh.vertex(face_i[0]);
auto v1_mesh = mesh.vertex(face_i[1]);
auto v2_mesh = mesh.vertex(face_i[2]);
glm::dvec3 v0_tri = ProjectPoint(glm::dvec3(v0_mesh[0], v0_mesh[1], v0_mesh[2]), Mview, param_sets[img_i].cam);
glm::dvec3 v1_tri = ProjectPoint(glm::dvec3(v1_mesh[0], v1_mesh[1], v1_mesh[2]), Mview, param_sets[img_i].cam);
glm::dvec3 v2_tri = ProjectPoint(glm::dvec3(v2_mesh[0], v2_mesh[1], v2_mesh[2]), Mview, param_sets[img_i].cam);
triangles_projected.push_back(vector<glm::dvec3>{v0_tri, v1_tri, v2_tri});
img_vertices.setPixel(v0_tri.x, img.height()-1-v0_tri.y, qRgb(255, 255, 255));
img_vertices.setPixel(v1_tri.x, img.height()-1-v1_tri.y, qRgb(255, 255, 255));
img_vertices.setPixel(v2_tri.x, img.height()-1-v2_tri.y, qRgb(255, 255, 255));
}
img_vertices.save("mesh_with_vertices.png");
#endif
#define DEBUG_RECON 1 // for visualizing large scale recon selection related data
message("generating mean texture...");
message("collecting texels...");
if(generate_mean_texture) {
// for each pixel in the texture map, use backward projection to obtain pixel value in the input image
// accumulate the texels in average texel map
for(int ti=0;ti<tex_size;++ti) {
for(int tj=0;tj<tex_size;++tj) {
PixelInfo pix_ij = albedo_pixel_map[ti][tj];
// skip if the triangle is not visible
if(triangles.find(pix_ij.fidx) == triangles.end()) continue;
auto face_i = mesh.face(pix_ij.fidx);
auto v0_mesh = mesh.vertex(face_i[0]);
auto v1_mesh = mesh.vertex(face_i[1]);
auto v2_mesh = mesh.vertex(face_i[2]);
auto v = v0_mesh * pix_ij.bcoords.x + v1_mesh * pix_ij.bcoords.y + v2_mesh * pix_ij.bcoords.z;
glm::dvec3 v_img = ProjectPoint(glm::dvec3(v[0], v[1], v[2]), Mview, param_sets[img_i].cam);
// take the pixel from the input image through bilinear sampling
glm::dvec3 texel = bilinear_sample(image_points_pairs[img_i].first, v_img.x, image_points_pairs[img_i].first.height()-1-v_img.y);
if(texel.r < 0 && texel.g < 0 && texel.b < 0) continue;
mean_texture[ti][tj] += texel;
mean_texture_weight[ti][tj] += 1.0;
}
}
}
}
message("done.");
try {
// [Optional]: render the mesh with texture to verify the texel values
if(generate_mean_texture) {
message("computing mean texture...");
mean_texture_image = QImage(tex_size, tex_size, QImage::Format_ARGB32);
mean_texture_image.fill(0);
for(int ti=0; ti<tex_size; ++ti) {
for (int tj=0; tj<(tex_size/2); ++tj) {
double weight_ij = mean_texture_weight[ti][tj];
double weight_ij_s = mean_texture_weight[ti][tex_size-1-tj];
if(weight_ij == 0 && weight_ij_s == 0) {
mean_texture_mat.at<cv::Vec3d>(ti, tj) = cv::Vec3d(0, 0, 0);
continue;
} else {
glm::dvec3 texel = (mean_texture[ti][tj] + mean_texture[ti][tex_size-1-tj]) / (weight_ij + weight_ij_s);
mean_texture[ti][tj] = texel;
mean_texture[ti][tex_size-1-tj] = texel;
mean_texture_image.setPixel(tj, ti, qRgb(texel.r, texel.g, texel.b));
mean_texture_image.setPixel(tex_size-1-tj, ti, qRgb(texel.r, texel.g, texel.b));
mean_texture_mat.at<cv::Vec3d>(ti, tj) = cv::Vec3d(texel.x, texel.y, texel.z);
mean_texture_mat.at<cv::Vec3d>(ti, tex_size-1-tj) = cv::Vec3d(texel.x, texel.y, texel.z);
}
}
}
message("done.");
cv::resize(mean_texture_mat, mean_texture_mat, cv::Size(), 0.25, 0.25);
//cv::Mat mean_texture_refined_mat = mean_texture_mat.clone();
cv::Mat mean_texture_refined_mat;
{
boost::timer::auto_cpu_timer timer_solve(
"[Joint optimization] Mean texture generation = %w seconds.\n");
#if 1
cv::GaussianBlur(mean_texture_mat, mean_texture_refined_mat, cv::Size(5, 5), 3.0);
mean_texture_refined_mat = StatsUtils::MeanShiftSegmentation(mean_texture_refined_mat, 5.0, 30.0, 0.5);
mean_texture_refined_mat = 0.25 * mean_texture_mat + 0.75 * mean_texture_refined_mat;
/*
mean_texture_refined_mat = StatsUtils::MeanShiftSegmentation(mean_texture_refined_mat, 10.0, 30.0, 0.5);
mean_texture_refined_mat = 0.25 * mean_texture_mat + 0.75 * mean_texture_refined_mat;
mean_texture_refined_mat = StatsUtils::MeanShiftSegmentation(mean_texture_refined_mat, 20.0, 30.0, 0.5);
mean_texture_refined_mat = 0.25 * mean_texture_mat + 0.75 * mean_texture_refined_mat;
*/
cv::resize(mean_texture_refined_mat, mean_texture_refined_mat, cv::Size(), 4.0, 4.0);
#else
cv::Mat mean_texture_refined_mat = mean_texture_mat;
#endif
}
QImage mean_texture_image_refined(tex_size, tex_size, QImage::Format_ARGB32);
for(int ti=0;ti<tex_size;++ti) {
for(int tj=0;tj<tex_size;++tj) {
cv::Vec3d pix = mean_texture_refined_mat.at<cv::Vec3d>(ti, tj);
mean_texture_image_refined.setPixel(tj, ti, qRgb(pix[0], pix[1], pix[2]));
}
}
#if DEBUG_RECON
mean_texture_image.save( (step_result_path / fs::path("mean_texture.png")).string().c_str() );
mean_texture_image_refined.save( (step_result_path / fs::path("mean_texture_refined.png")).string().c_str() );
#endif
mean_texture_image = mean_texture_image_refined;
}
} catch(exception& e) {
cerr << e.what() << endl;
exit(1);
}
}
}
vector<pair<int, double>> d_texture(num_images);
// Rendering the albedo to each image
vector<QImage> albedo_images(num_images);
//#pragma omp parallel for
for(int i=0;i<num_images;++i) {
// for each image bundle, render the mesh to FBO with culling to get the visible triangles
OffscreenMeshVisualizer visualizer(image_points_pairs[i].first.width(),
image_points_pairs[i].first.height());
visualizer.SetMVPMode(OffscreenMeshVisualizer::CamPerspective);
visualizer.SetRenderMode(OffscreenMeshVisualizer::TexturedMesh);
visualizer.BindMesh(param_sets[i].mesh);
visualizer.BindTexture(mean_texture_image);
visualizer.SetCameraParameters(param_sets[i].cam);
visualizer.SetMeshRotationTranslation(param_sets[i].model.R, param_sets[i].model.T);
visualizer.SetFacesToRender(valid_faces_indices);
vector<float> depth_i;
tie(albedo_images[i],depth_i) = visualizer.RenderWithDepth();
auto unpack_pixel = [](QRgb pix) {
return Vector3d(qRed(pix)/255.0, qGreen(pix)/255.0, qBlue(pix)/255.0);
};
int img_w = image_points_pairs[i].first.width();
int img_h = image_points_pairs[i].first.height();
vector<int> valid_pixels_map_i;
for(int y=0;y<img_h;++y) {
for(int x=0;x<img_w;++x) {
float dval = depth_i[(img_h-1-y)*img_w+x];
if(dval<1) {
valid_pixels_map_i.push_back(y*img_w + x);
QRgb pix1 = albedo_images[i].pixel(x, y);
albedo_images[i].setPixel(x, y, qRgb(qBlue(pix1), qGreen(pix1), qRed(pix1)));
}
}
}
albedo_images[i] = TransferColor(albedo_images[i], image_points_pairs[i].first,
valid_pixels_map_i, valid_pixels_map_i);
#if DEBUG_RECON
albedo_images[i].save( (step_result_path / fs::path("albedo_" + std::to_string(i) + ".png")).string().c_str() );
#endif
// compute texture difference
double diff_i = 0;
int valid_count = 0;
#if DEBUG_RECON
QImage depth_image = albedo_images[i];
depth_image.fill(0);
#endif
for(int y=0;y<img_h;++y) {
for(int x=0;x<img_w;++x) {
float dval = depth_i[(img_h-1-y)*img_w+x];
if(dval<1) {
#if DEBUG_RECON
depth_image.setPixel(x, y, qRgb(dval*255, 0, (1-dval)*255));
#endif
valid_count++;
QRgb pix1 = albedo_images[i].pixel(x, y);
QRgb pix2 = image_points_pairs[i].first.pixel(x, y);
auto p1 = unpack_pixel(pix1);
auto p2 = unpack_pixel(pix2);
double dr = p1[0] - p2[0];
double dg = p1[1] - p2[1];
double db = p1[2] - p2[2];
diff_i += dr*dr+dg*dg+db*db;
}
}
}
d_texture[i] = make_pair(i, diff_i/valid_count);
#if DEBUG_RECON
depth_image.save( (step_result_path / fs::path("depth_" + std::to_string(i) + ".png")).string().c_str() );
#endif
}
auto subset_texture = take_first_k(d_texture, k);
for(auto sx : subset_texture) cout << sx << ' '; cout << endl;
#endif
// Merge them into a consistent set
set<int> final_set(subset_identity.begin(), subset_identity.end());
for(int i=0;i<num_images;++i) {
if(subset_identity.count(i)) {
#if 1
// Use expression as a condition
bool exclude = (subset_expression.count(i) == 0) || (subset_error.count(i) == 0) ||
(subset_texture.count(i) == 0) || (find(inliers.begin(), inliers.end(), i) == inliers.end());
#else
// Use only recon error and texture metric
bool exclude = (subset_error.count(i) == 0) || (subset_texture.count(i) == 0);
#endif
if(exclude) final_set.erase(i);
}
}
// rare case, we go with the mean identity
if(final_set.empty()) {
final_set = take_first_k(d_identity, 1);
}
consistent_set.assign(final_set.begin(), final_set.end());
for(auto i : consistent_set) {
VisualizeReconstructionResult(selection_result_path, i);
}
break;
}
case 2: {
// nothing to do, just use whatever consistent_set is
break;
}
}
// Compute the centroid of the consistent set
identity_centroid = VectorXd::Zero(param_sets[0].model.Wid.rows());
for(auto i : consistent_set) {
cout << i << endl;
identity_centroid += param_sets[i].model.Wid;
}
identity_centroid /= consistent_set.size();
// Update the identity weights for all images
for(auto& param : param_sets) {
param.model.Wid = identity_centroid;
}
// Joint reconstruction step, obtain refined identity weights
int num_iters_joint_optimization = (iters_main_loop == max_iters_main_loop)?4:3;
// Just one-pass optimization
opt_params.num_initializations = 1;
for(int iters_joint_optimization=0;
iters_joint_optimization<num_iters_joint_optimization;
++iters_joint_optimization){
// [Joint reconstruction] step 1: estimate pose and expression weights individually
// In the final iteration, no need to refine the identity weights anymore
if((iters_joint_optimization == num_iters_joint_optimization - 1) && (iters_main_loop == max_iters_main_loop)) {
// Store the final selection
// HACK try to use the inliners as final_chosen_set to produce more point clouds
#if 1
final_chosen_set = consistent_set;
#else
// No good!
final_chosen_set = inliers;
#endif
// Reset consistent_set so all images will be reconstructed in this iteration
consistent_set.resize(num_images);
for(int i=0;i<num_images;++i) consistent_set[i] = i;
}
fs::path joint_pre_result_path = step_result_path / fs::path("joint_recon_" + to_string(iters_joint_optimization) + "_pre");
safe_create(joint_pre_result_path);
for(auto i : consistent_set) {
single_recon.SetMesh(param_sets[i].mesh);
single_recon.SetIndices(param_sets[i].indices);
single_recon.SetImageSize(param_sets[i].recon.imageWidth, param_sets[i].recon.imageHeight);
single_recon.SetConstraints(param_sets[i].recon.cons);
single_recon.SetInitialParameters(param_sets[i].model, param_sets[i].cam);
single_recon.SetOptimizationMode(
static_cast<typename SingleImageReconstructor<Constraint>::OptimizationMode>(
SingleImageReconstructor<Constraint>::Pose
| SingleImageReconstructor<Constraint>::Expression
| SingleImageReconstructor<Constraint>::FocalLength));
{
boost::timer::auto_cpu_timer t("Single image reconstruction finished in %w seconds.\n");
single_recon.Reconstruct(opt_params);
}
// Store results
auto tm = single_recon.GetGeometry();
param_sets[i].mesh.UpdateVertices(tm);
param_sets[i].model = single_recon.GetModelParameters();
param_sets[i].indices = single_recon.GetIndices();
param_sets[i].cam = single_recon.GetCameraParameters();
if (true) {
// Visualize the reconstruction results
VisualizeReconstructionResult(joint_pre_result_path, i);
}
}
if((iters_joint_optimization == num_iters_joint_optimization - 1) && (iters_main_loop == max_iters_main_loop)) {
// In the final iteration, no need to refine the identity weights anymore
break;
}
// [Joint reconstruction] step 2: estimate identity weights jointly
{
fs::path joint_post_result_path = step_result_path / fs::path("joint_recon_" + to_string(iters_joint_optimization) + "_post");
safe_create(joint_post_result_path);
ceres::Problem problem;
VectorXd params = param_sets[0].model.Wid;
// Add constraints from each image
for(auto i : consistent_set) {
// Create a projected model first
vector<MultilinearModel> model_projected_i(param_sets[i].indices.size());
for(size_t j=0;j<param_sets[i].indices.size();++j) {
model_projected_i[j] = model.project(vector<int>(1, param_sets[i].indices[j]));
model_projected_i[j].ApplyWeights(param_sets[i].model.Wid, param_sets[i].model.Wexp);
}
// Create relevant matrices
glm::dmat4 Rmat_i = glm::eulerAngleYXZ(param_sets[i].model.R[0], param_sets[i].model.R[1],
param_sets[i].model.R[2]);
glm::dmat4 Tmat_i = glm::translate(glm::dmat4(1.0),
glm::dvec3(param_sets[i].model.T[0],
param_sets[i].model.T[1],
param_sets[i].model.T[2]));
glm::dmat4 Mview_i = Tmat_i * Rmat_i;
double puple_distance = glm::distance(
0.5 * (param_sets[i].recon.cons[28].data + param_sets[i].recon.cons[30].data),
0.5 * (param_sets[i].recon.cons[32].data + param_sets[i].recon.cons[34].data));
double weight_i = 100.0 / puple_distance;
// Add per-vertex constraints
for(size_t j=0;j<param_sets[i].indices.size();++j) {
ceres::CostFunction * cost_function = new IdentityCostFunction_analytic(
model_projected_i[j], param_sets[i].recon.cons[j], params.size(), Mview_i, Rmat_i,
param_sets[i].cam, weight_i);
problem.AddResidualBlock(cost_function, NULL, params.data());
}
}
// Add prior constraint
ceres::DynamicNumericDiffCostFunction<PriorCostFunction> *prior_cost_function =
new ceres::DynamicNumericDiffCostFunction<PriorCostFunction>(
new PriorCostFunction(prior.Wid_avg, prior.inv_sigma_Wid,
prior.weight_Wid * consistent_set.size()));
prior_cost_function->AddParameterBlock(params.size());
prior_cost_function->SetNumResiduals(1);
problem.AddResidualBlock(prior_cost_function, NULL, params.data());
// Solve it
{
boost::timer::auto_cpu_timer timer_solve(
"[Identity optimization] Problem solve time = %w seconds.\n");
ceres::Solver::Options options;
options.max_num_iterations = 3;
options.minimizer_type = ceres::LINE_SEARCH;
options.line_search_direction_type = ceres::LBFGS;
DEBUG_EXPR(options.minimizer_progress_to_stdout = true;)
ceres::Solver::Summary summary;
ceres::Solve(options, &problem, &summary);
DEBUG_OUTPUT(summary.FullReport())
}
// Update the identity weights
for(auto& param : param_sets) {
param.model.Wid = params;
// Also update geometry if needed
{
model.ApplyWeights(param.model.Wid, param.model.Wexp);
param.mesh.UpdateVertices(model.GetTM());
param.mesh.ComputeNormals();
}
}
for(auto i : consistent_set) {
if(true) {
VisualizeReconstructionResult(joint_post_result_path, i);
}
}
identity_weights_centroid_history.push_back(params);
}
}
} // end of main reconstruction loop
// Output the reconstructed identity weights
{
for(size_t i=0;i<identity_weights_history.size();++i) {
ofstream fout("identity_matrix" + std::to_string(i) + ".txt");
fout << identity_weights_history[i];
fout.close();
}
for(size_t i=0;i<identity_weights_centroid_history.size();++i) {
ofstream fout("identity_centroid" + std::to_string(i) + ".txt");
fout << identity_weights_centroid_history[i];
fout.close();
}
}
// Output the chosen subset
{
ofstream fout( (result_path / fs::path("selection.txt")).string() );
for(int i=0;i<final_chosen_set.size();++i) {
// The row indices in the settings file!
const int row_index = final_chosen_set[i];
const int L = param_sets[row_index].img_filename.size();
fout << param_sets[row_index].img_filename.substr(0, L-4) << endl;
}
fout.close();
}
// Visualize the final reconstruction results
for(int i=0;i<num_images;++i) {
// Visualize the reconstruction results
#if 0
MeshVisualizer* w = new MeshVisualizer("reconstruction result", param_sets[i].mesh);
w->BindConstraints(image_points_pairs[i].second);
w->BindImage(image_points_pairs[i].first);
w->BindLandmarks(init_indices);
w->BindUpdatedLandmarks(param_sets[i].indices);
w->SetMeshRotationTranslation(param_sets[i].model.R, param_sets[i].model.T);
w->SetCameraParameters(param_sets[i].cam);
int show_width = image_points_pairs[i].first.width();
int show_height = image_points_pairs[i].first.height();
double show_ratio = 640.0 / show_height;
w->resize(show_width * show_ratio, 640);
w->show();
w->paintGL();
QImage recon_image = w->grabFrameBuffer();
fs::path image_path = fs::path(image_filenames[i]);
recon_image.save( (result_path / fs::path(image_path.stem().string() + "_recon.png")).string().c_str() );
#else
VisualizeReconstructionResult(result_path, i);
#endif
ofstream fout(image_filenames[i] + ".res");
fout << param_sets[i].cam << endl;
fout << param_sets[i].model << endl;
fout << param_sets[i].stats << endl;
fout.close();
}
return true;
}
#endif //MULTILINEARRECONSTRUCTION_MULTIIMAGERECONSTRUCTOR_H
|
final.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#include <omp.h>
#include <cilk/cilk.h>
#define NMAX 75000000
double* N;
int* lt;
int* gt;
int* eq;
double* local;
void init(int size){
N = malloc(size * sizeof(double));
lt = malloc(size * sizeof(int));
gt = malloc(size * sizeof(int));
local = malloc(size * sizeof(double));
eq = malloc(size * sizeof(int));
}
void reinit(int size){
N = realloc(N, size * sizeof(double));
lt = realloc(lt, size * sizeof(int));
gt = realloc(gt, size * sizeof(int));
local = realloc(local, size * sizeof(double));
eq = realloc(eq, size * sizeof(int));
}
void printArray(int n){
int j;
printf("[");
int t =0;
for(j = 0; j<n; j++){
if(t){
printf(", %f", N[j]);
}else{
t=1;
printf("%f", N[j]);
}
}
printf("]\n");
}
double drand ( double low, double high )
{
return ( (double)rand() * ( high - low ) ) / (double)RAND_MAX + low;
}
void fillArrayRandom(int n){
int j;
for(j = 0; j<n; j++){
double r = drand(0,1000);
N[j]=r;
}
}
void fillSorted(int n){
int j;
N[0]=0.5;
for(j = 1; j<n; j++){
double r = N[j-1]+1;
N[j]=r;
}
}
void fillReverseSorted(int n){
int j;
N[0]=n+0.5;
for(j = 1; j<n; j++){
double r = N[j-1]-1;
N[j]=r;
}
}
void fillSame(int n){
int j;
double r = drand(0,1000);
for(j = 0; j<n; j++){
N[j]=r;
}
}
void fillMostlySame(int n){
int j;
double r = drand(0,1000);
for(j=0; j<n/4; j++){
N[j]=r;
}
r = drand(0,1000);
for(j = n/4; j<(3*n/4); j++){
N[j]=r;
}
r = drand(0,1000);
for(j=(3*n/4); j<n; j++){
N[j]=r;
}
}
int seqPartition(int p, int r){
double key=N[r];
int i=p-1;
int j;
double temp;
for(j=p; j<r; j++){
if(N[j]<key){
i+=1;
temp = N[i];
N[i]=N[j];
N[j]=temp;
}
}
temp = N[i+1];
N[i+1]=N[r];
N[r]=temp;
return i+1;
}
int rPartition(p,r){
int random = (rand() % ((r-p) + 1))+p;
double temp = N[random];
N[random] = N[r];
N[r]=temp;
return seqPartition(p,r);
}
int partition(int p, int r){
double key=N[r];
int i=p-1;
int j, k = 0;
double temp;
for(j=p; j<r; j++){
if(N[j]<key){
i+=1;
temp = N[i];
N[i]=N[j];
N[j]=temp;
}else if(N[j]==key){
k+=1;
}
}
if(k==(r-p)){
return -k;
}
temp = N[i+1];
N[i+1]=N[r];
N[r]=temp;
return i+1;
}
void quickSortHelper(int p, int r){
if(p<r){
int q=rPartition(p,r);
quickSortHelper(p,q-1);
quickSortHelper(q+1,r);
}
}
double sequentialQuickSort(int n){
double t1, t2;
t1 = omp_get_wtime();
quickSortHelper(0, n-1);
t2 = omp_get_wtime();
return t2-t1;
}
void insertionSortHelper(int p, int r){
double key;
int j, i;
for (i = p+1; i<r+1 ; i++){
key = N[i];
j = i-1;
while (j >= p && N[j] > key){
N[j+1] = N[j];
j--;
}
N[j+1] = key;
}
}
void prefixSum(int arr[], int p, int r){
int i;
for(i=p+1;i<r+1;i++){
arr[i]+=arr[i-1];
}
}
int log_2(int n){
int i=0;
while(n >>= 1) {++i;}
return i;
}
void parallelPrefixSum(int p, int r){
int len = r-p+1;
int shift, j, h;
int k = log_2(len);
for(h=1; h<k+1;h++){
shift = 1<<h;
cilk_for (j=1; j<(len/shift)+1;j++){
lt[p+j*shift-1]+=lt[p+j*shift-(shift/2)-1];
gt[p+j*shift-1]+=gt[p+j*shift-(shift/2)-1];
eq[p+j*shift-1]+=eq[p+j*shift-(shift/2)-1];
}
}
for(h=k; h>-1;h--){
shift = 1<<h;
cilk_for (j=2; j<(len/shift)+1;j++){
if(j%2==1){
lt[p+j*shift-1]+=lt[p+j*shift-shift-1];
gt[p+j*shift-1]+=gt[p+j*shift-shift-1];
eq[p+j*shift-1]+=eq[p+j*shift-shift-1];
}
}
}
}
int parallelPartition(int p, int r){
double key=N[r];
int i,j;
double temp;
cilk_for (i=p; i<r+1; i++){
lt[i]=0;
gt[i]=0;
eq[i]=0;
local[i]=N[i];
}
cilk_for (i = p; i <r; i++){
if(N[i]<key){
lt[i]=1;
gt[i]=0;
}else if(N[i]>key){
lt[i]=0;
gt[i]=1;
}else{
eq[i]=1;
gt[i]=0;
lt[i]=0;
}
}
parallelPrefixSum(p,r);
int pivot = lt[r];
if(p+eq[r] == r){
return -1*(r-p);
}
if(p+pivot == r){
return -1*(r-p);
}
N[pivot+p]=key;
cilk_for (i=p; i<r; i++){
if(local[i]<key){
int index = p+lt[i]-1;
N[index]=local[i];
}else if(local[i] > key){
int index = p+pivot+eq[r]+gt[i];
N[index]=local[i];
}else{
int index = p+pivot+eq[i];
N[index]=local[i];
}
}
return pivot+p;
}
int randomizedPartition(p,r,size){
int random = (rand() % ((r-p) + 1))+p;
double temp = N[random];
N[random] = N[r];
N[r]=temp;
if(r-p < 0.5*size){
return partition(p,r);
}else{
return parallelPartition(p,r);
}
}
void psqHelper(int p, int r, int size){
if(p<r){
if(r-p<=50){
insertionSortHelper(p,r);
}else{
int q = randomizedPartition(p,r, size);
if(q<0){
return;
}
cilk_spawn psqHelper(p,q-1, size);
psqHelper(q+1,r, size);
}
}
}
double parallelQuickSort(int n){
double t1, t2;
#pragma omp master
t1 = omp_get_wtime();
psqHelper(0, n-1, n);
#pragma omp master
t2 = omp_get_wtime();
return t2-t1;
}
int checkArray(int n){
int j;
for(j = 0; j<n-1; j++){
if(N[j]>N[j+1]){
return -1;
}
}
return 0;
}
int main(int argc, char * argv[]){
__cilkrts_set_param("nworkers", argv[1]);
int mode = atoi(argv[2]); //rand, sorted, rsorted, same
FILE* fp = fopen("simTimes.csv","a+");
int len=10;
int n[] = {10, 100, 1000, 10000, 100000, 1000000, 10000000, 50000000, 100000000, 250000000};
int i;
srand(getpid());
if (atoi(argv[1]) == 1){
for(i=0; i<len; i++){
if(i==0){
init(n[i]);
}
else{
reinit(n[i]);
}
switch(mode){
case 0:
fillArrayRandom(n[i]);
break;
case 1:
fillSorted(n[i]);
break;
case 2:
fillReverseSorted(n[i]);
break;
case 3:
fillMostlySame(n[i]);
}
double t = sequentialQuickSort(n[i]);
fprintf(fp,"%d,1,%d,%f\n",mode,n[i],t);
}
}
else{
for(i = 0; i<len; i++){
if(i==0){
init(n[i]);
}
else{
reinit(n[i]);
}
switch(mode){
case 0:
fillArrayRandom(n[i]);
break;
case 1:
fillSorted(n[i]);
break;
case 2:
fillReverseSorted(n[i]);
break;
case 3:
fillMostlySame(n[i]);
}
double t = parallelQuickSort(n[i]);
int numworkers = __cilkrts_get_nworkers();
printf("%d elements sorted in %f time with %d workers\n", n[i], t, numworkers);
fprintf(fp,"%d, %d,%d,%f\n",mode,numworkers,n[i],t);
if(checkArray(n[i])==-1){
printf("SORT FAILED\n");
}else{
printf("SUCCESSFUL SORT\n");
}
}
}
free(N);
free(lt);
free(gt);
free(local);
free(eq);
fclose(fp);
}
|
GB_unop__cosh_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__cosh_fc32_fc32)
// op(A') function: GB (_unop_tran__cosh_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = ccoshf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ccoshf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = ccoshf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COSH || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__cosh_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = ccoshf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = ccoshf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__cosh_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tlr_potrf.h | #ifndef __H2OPUS_TLR_POTRF_H__
#define __H2OPUS_TLR_POTRF_H__
#include <h2opus/core/hara_util.cuh>
#include <h2opus/util/error_approximation.h>
#include <h2opus/util/host_ara.h>
#include <h2opus/core/tlr/tlr_defs.h>
#include <h2opus/core/tlr/tlr_struct.h>
#include <h2opus/core/tlr/tlr_trsm.h>
#include <h2opus/core/tlr/tlr_potrf_workspace.h>
#include <h2opus/core/tlr/tlr_potrf_marshal.h>
#include <h2opus/core/tlr/tlr_potrf_config.h>
#include <h2opus/core/tlr/tlr_batch.h>
#include <h2opus/core/tlr/tlr_potrf_util.h>
#define H2OPUS_TLR_POTRF_USE_MAGMA_FIX
#define H2OPUS_TLR_USE_SCHUR_COMPENSATION
// TODO: Redo workspace to use ara bs instead of max rank
// Update projection to sample using the ara bs instead of the rank
// Merge gemms for small ranks
// GPU kernels for pivot selection
// Graceful return when non-spd matrix is encountered
////////////////////////////////////////////////////////////////////////////
// Main subroutines
////////////////////////////////////////////////////////////////////////////
// Sample the sum of the low rank updates coming from the block rows of a sub-matrix of A
// i.e. compute output_i = sum_{j=col_start:col_end} A_ij * A_kj^T * input_i for each block in row i
template <class T, int hw, bool transpose>
void tlr_potrf_sample_lru(TTLR_Matrix<T, hw> &A, int k, int *row_indices, int rows, int col_start, int col_end,
T **input_ptrs, T **output_ptrs, int *samples_batch, int max_samples,
H2OpusTLRPotrfWorkspace<T> &workspace, h2opusComputeStream_t stream)
{
int block_size = A.block_size;
// Figure out how many block columns we can process in parallel
int par_block_cols = workspace.num_sampling_parallel_buffers / rows;
assert(par_block_cols != 0);
par_block_cols = std::min(col_end - col_start + 1, par_block_cols);
if (par_block_cols == 0)
return;
int *bs_batch = workspace.sampling_bs_batch;
int *rank_ij_batch = workspace.sampling_rank_ij_batch;
int *rank_kj_batch = workspace.sampling_rank_kj_batch;
int *max_rank_batch = workspace.sampling_max_rank_batch;
int *samples_i_batch = workspace.sampling_samples_i_batch;
T **Uij_ptrs = workspace.sampling_Uij_ptrs, **Ukj_ptrs = workspace.sampling_Ukj_ptrs;
T **Vij_ptrs = workspace.sampling_Vij_ptrs, **Vkj_ptrs = workspace.sampling_Vkj_ptrs;
T **T1_ptrs = workspace.sampling_buffer_T1, **T2_ptrs = workspace.sampling_buffer_T2;
// Reuse T1 buffers for T3
T **T3_ptrs = workspace.sampling_buffer_T1, **T4_ptrs = workspace.sampling_buffer_T4;
T **input_i_ptrs = workspace.samplinge_input_i_ptrs;
#ifdef H2OPUS_TLR_POTRF_USE_MAGMA_FIX
TLR_Potrf_Phase_Times<hw>::startPhase(TLR_Potrf_Phase_Types::Clear);
// if(hw == H2OPUS_HWTYPE_GPU)
{
// Clear out the accumulation buffer T4 since Magma doesn't set C = C * beta when the inner dimension of the
// gemm is zero
size_t T4_entries = block_size * max_samples;
int max_par_blocks = par_block_cols * rows;
fillArray(workspace.base_buffer_T4, T4_entries * max_par_blocks, 0, stream, hw);
// This is only necessary since we have to manually zero out the data due to the above Magma issue
generateArrayOfPointers(workspace.base_buffer_T4, workspace.sampling_buffer_T4, T4_entries, max_par_blocks,
stream, hw);
}
TLR_Potrf_Phase_Times<hw>::endPhase(TLR_Potrf_Phase_Types::Clear);
#endif
// Sample multiple block columns at a time when we can
// Each sample goes into its own buffer, and we do a reduction
// on the buffers when we are done
int sampled_block_cols = col_start;
T T4_beta = 0;
TLR_Potrf_Phase_Times<hw>::startPhase(transpose ? TLR_Potrf_Phase_Types::Projection
: TLR_Potrf_Phase_Types::Sample);
while (sampled_block_cols <= col_end)
{
// Sample the low rank updates:
// T4 += A_ij * A_kj^T * R = U_ij * V_ij^T V_kj (U_kj^T * R) = U_ij * V_ij^T V_kj * T1
// = U_ij * V_ij^T * T2 = U_ij * T3
int block_columns = std::min(par_block_cols, col_end - sampled_block_cols + 1);
int sample_block_count = block_columns * rows;
// Marshal the blocks that need to be sampled in this set of block columns
tlr_potrf_marshal_lru_sample_range<T, hw, transpose>(
vec_ptr(A.block_U_ptrs), vec_ptr(A.block_V_ptrs), vec_ptr(A.block_ranks), k, A.n_block, row_indices, rows,
sampled_block_cols, block_columns, Uij_ptrs, Vij_ptrs, Ukj_ptrs, Vkj_ptrs, input_ptrs, input_i_ptrs,
samples_batch, samples_i_batch, rank_ij_batch, rank_kj_batch, NULL, 0, NULL, sample_block_count, stream);
int max_rank_kj = getMaxElement(rank_kj_batch, sample_block_count, stream, hw);
int max_rank_ij = getMaxElement(rank_ij_batch, sample_block_count, stream, hw);
// Now that we marshaled the low rank pointers, execute the needed gemms
// T1 = U_kj^T * R
check_kblas_error((H2OpusBatched<T, hw>::gemm)(stream, H2Opus_Trans, H2Opus_NoTrans, rank_kj_batch,
samples_i_batch, bs_batch, max_rank_kj, max_samples, block_size,
(T)1, (const T **)Ukj_ptrs, bs_batch, (const T **)input_i_ptrs,
bs_batch, 0, T1_ptrs, max_rank_batch, sample_block_count));
// T2 = V_kj * T1
check_kblas_error((H2OpusBatched<T, hw>::gemm)(
stream, H2Opus_NoTrans, H2Opus_NoTrans, bs_batch, samples_i_batch, rank_kj_batch, block_size, max_samples,
max_rank_kj, (T)1, (const T **)Vkj_ptrs, bs_batch, (const T **)T1_ptrs, max_rank_batch, 0, T2_ptrs,
bs_batch, sample_block_count));
// T3 = V_ij^T * T2
check_kblas_error((H2OpusBatched<T, hw>::gemm)(stream, H2Opus_Trans, H2Opus_NoTrans, rank_ij_batch,
samples_i_batch, bs_batch, max_rank_ij, max_samples, block_size,
(T)1, (const T **)Vij_ptrs, bs_batch, (const T **)T2_ptrs,
bs_batch, 0, T3_ptrs, max_rank_batch, sample_block_count));
// T4 += U_ij * T3
check_kblas_error((H2OpusBatched<T, hw>::gemm)(
stream, H2Opus_NoTrans, H2Opus_NoTrans, bs_batch, samples_i_batch, rank_ij_batch, block_size, max_samples,
max_rank_ij, (T)1, (const T **)Uij_ptrs, bs_batch, (const T **)T3_ptrs, max_rank_batch, T4_beta, T4_ptrs,
bs_batch, sample_block_count));
sampled_block_cols += block_columns;
T4_beta = 1;
}
TLR_Potrf_Phase_Times<hw>::endPhase(transpose ? TLR_Potrf_Phase_Types::Projection : TLR_Potrf_Phase_Types::Sample);
TLR_Potrf_Phase_Times<hw>::startPhase(TLR_Potrf_Phase_Types::Reduction);
// Do a reduction on the parallel buffers
if (par_block_cols > 0)
{
TLR_Batch<T, hw>::reduceMatrixBuffers(0, output_ptrs, bs_batch, bs_batch, samples_batch, -1, T4_ptrs, bs_batch,
par_block_cols, block_size, max_samples, rows, stream);
}
TLR_Potrf_Phase_Times<hw>::endPhase(TLR_Potrf_Phase_Types::Reduction);
}
template <class T, int hw, bool transpose>
void tlr_potrf_sample_col(TTLR_Matrix<T, hw> &A, int k, int *row_indices, int rows, T **input_ptrs, T **output_ptrs,
int *samples_batch, int max_samples, H2OpusTLRPotrfWorkspace<T> &workspace,
h2opusComputeStream_t stream)
{
TLR_Potrf_Phase_Times<hw>::startPhase(transpose ? TLR_Potrf_Phase_Types::Projection
: TLR_Potrf_Phase_Types::Sample);
int block_size = A.block_size;
// Re-use the lru sampling arrays
int *bs_batch = workspace.sampling_bs_batch;
int *max_rank_batch = workspace.sampling_max_rank_batch;
T **T1_ptrs = workspace.sampling_buffer_T1;
T **Uik_ptrs = workspace.sampling_Uij_ptrs;
T **Vik_ptrs = workspace.sampling_Vij_ptrs;
int *rank_ik_batch = workspace.sampling_rank_ij_batch;
tlr_potrf_marshal_col_sample_range<T, hw, transpose>(vec_ptr(A.block_U_ptrs), vec_ptr(A.block_V_ptrs),
vec_ptr(A.block_ranks), k, A.n_block, row_indices, rows,
Uik_ptrs, Vik_ptrs, rank_ik_batch, stream);
int max_col_rank = getMaxElement(rank_ik_batch, rows, stream, hw);
// T1 = V_ik^T * R
check_kblas_error((H2OpusBatched<T, hw>::gemm)(stream, H2Opus_Trans, H2Opus_NoTrans, rank_ik_batch, samples_batch,
bs_batch, max_col_rank, max_samples, block_size, (T)1,
(const T **)Vik_ptrs, bs_batch, (const T **)input_ptrs, bs_batch, 0,
T1_ptrs, max_rank_batch, rows));
// output += U_ik * T1
check_kblas_error((H2OpusBatched<T, hw>::gemm)(stream, H2Opus_NoTrans, H2Opus_NoTrans, bs_batch, samples_batch,
rank_ik_batch, block_size, max_samples, max_col_rank, (T)1,
(const T **)Uik_ptrs, bs_batch, (const T **)T1_ptrs, max_rank_batch,
(T)1, output_ptrs, bs_batch, rows));
TLR_Potrf_Phase_Times<hw>::endPhase(transpose ? TLR_Potrf_Phase_Types::Projection : TLR_Potrf_Phase_Types::Sample);
}
// Generate an approximation of the low rank updated block column k
// in left looking cholesky
template <class T, int hw>
void tlr_potrf_update_block_column(TTLR_Matrix<T, hw> &A, T eps, int k, H2OpusTLRPotrfWorkspace<T> &workspace,
h2opusComputeStream_t stream, h2opusHandle_t h2opus_handle)
{
const int r = 10;
int max_rank = A.max_rank;
int n_block = A.n_block;
int rows = n_block - k - 1;
int block_size = A.block_size;
int sample_bs = workspace.sample_bs;
if (k == 0 || rows == 0)
return;
int row_index_start = k + 1;
T **input_ptrs = workspace.sampling_input, **sub_Q_ptrs = workspace.sampling_input_mod;
T **Q_ptrs = workspace.sampling_output, **sub_Y_ptrs = workspace.sampling_output_mod;
int *samples_batch = workspace.sampling_samples_batch;
int *bs_batch = workspace.sampling_bs_batch;
int *detected_ranks = workspace.detected_ranks;
int *small_vectors = workspace.small_vectors;
// Temporary storage for matrix Z, used in the basis orthogonalization
// Z is a (current_rank x samples) matrix. we can re-use the T1 array which
// has enough space for max_rank x samples.
T **Z_ptrs = workspace.sampling_buffer_T1;
int *ldz_batch = workspace.sampling_max_rank_batch;
#ifdef H2OPUS_TLR_USE_CHOLESKY_QR
double *R_diag = workspace.orthog_workspace.R_diag;
typedef double R_prec;
#else
T *R_diag = workspace.orthog_workspace.hh_R_diag;
typedef T R_prec;
#endif
TLR_Potrf_Phase_Times<hw>::startPhase(TLR_Potrf_Phase_Types::Clear);
////////////////////////////////////////////////////////////////////////
// Generate an approximation basis Q_i for the blocks of the column
////////////////////////////////////////////////////////////////////////
// Clear the output
// size_t output_entries = block_size * max_rank * rows;
// fillArray(workspace.base_buffer_output, output_entries, 0, stream, hw);
fillArray(detected_ranks, rows, 0, stream, hw);
fillArray(small_vectors, rows, 0, stream, hw);
fillArray(samples_batch, rows, sample_bs, stream, hw);
TLR_Potrf_Phase_Times<hw>::endPhase(TLR_Potrf_Phase_Types::Clear);
int block_rows = (rows < 20 ? rows : rows / 2);
int *row_indices = workspace.sampling_row_indices;
int *subset_ranks = workspace.sub_detected_ranks;
// Set the subset row index array
generateSequence(row_indices, rows, row_index_start, stream, hw);
// Sort the row indices by their original rank in descending order
int *original_ranks = vec_ptr(A.block_ranks) + row_index_start + k * n_block;
copyArray(original_ranks, subset_ranks, rows, stream, hw);
sortByKey(subset_ranks, row_indices, rows, true, stream, hw);
// Clear the ranks
fillArray(subset_ranks, rows, 0, stream, hw);
int converged_blocks = 0;
int *converged_blocks_ptr = workspace.converged_blocks;
fillArray(converged_blocks_ptr, 1, 0, stream, hw);
while (converged_blocks < rows)
{
int n_rows = std::min(block_rows, rows - converged_blocks);
// Set the Q and Y pointers based on the current ranks and the selected block rows
tlr_potrf_set_sample_basis_ptrs<T, hw>(sub_Q_ptrs, sub_Y_ptrs, Q_ptrs, bs_batch, row_indices, subset_ranks,
row_index_start, n_rows, stream);
TLR_Potrf_Phase_Times<hw>::startPhase(TLR_Potrf_Phase_Types::RandGen);
// Generate the random gaussian input vectors - each one of size block_size x samples
check_kblas_error((H2OpusBatched<T, hw>::rand)(stream, h2opus_handle, bs_batch, samples_batch, block_size,
sample_bs, input_ptrs, bs_batch, n_rows));
TLR_Potrf_Phase_Times<hw>::endPhase(TLR_Potrf_Phase_Types::RandGen);
// Sample the sum of the low rank updates in each block row in the sub-matrix A(k+1:end, 0:k-1)
tlr_potrf_sample_lru<T, hw, false>(A, k, row_indices, n_rows, 0, k - 1, input_ptrs, sub_Y_ptrs, samples_batch,
sample_bs, workspace, stream);
// Sample the current block column k and subtract the result from the previously accumulated samples
// i.e A_ik * R_i - sum_{j=0:k-1} A_ij * A_kj^T * R_i
tlr_potrf_sample_col<T, hw, false>(A, k, row_indices, n_rows, input_ptrs, sub_Y_ptrs, samples_batch, sample_bs,
workspace, stream);
TLR_Potrf_Phase_Times<hw>::startPhase(TLR_Potrf_Phase_Types::Orthog);
int current_max_rank = getMaxElement(subset_ranks, n_rows, stream, hw);
// Generate orthogonal basis from the samples and check for convergence
tlr_ara_gen_basis<T, hw>(sub_Q_ptrs, bs_batch, bs_batch, subset_ranks, block_size, current_max_rank, sub_Y_ptrs,
bs_batch, samples_batch, sample_bs, Z_ptrs, ldz_batch, n_rows,
workspace.orthog_workspace, stream);
TLR_Potrf_Phase_Times<hw>::endPhase(TLR_Potrf_Phase_Types::Orthog);
// Count the number of vectors that have a small magnitude
// also updates the rank, max diagonal and advances the Y_batch pointers
hara_util_svec_count_batch<T, R_prec, hw>(samples_batch, subset_ranks, small_vectors, R_diag, r, (R_prec)eps,
sample_bs, sub_Y_ptrs, sub_Q_ptrs, bs_batch, n_rows, stream);
// See how many samples we should take next, see how many operations converged and update the pointer arrays if
// necessary
converged_blocks =
tlr_ara_check_converged<hw>(samples_batch, row_indices, subset_ranks, small_vectors, converged_blocks_ptr,
detected_ranks, sample_bs, max_rank, r, n_rows, row_index_start, rows, stream);
}
////////////////////////////////////////////////////////////////////////
// Form B_i = A_ik^T Q_i
////////////////////////////////////////////////////////////////////////
TLR_Potrf_Phase_Times<hw>::startPhase(TLR_Potrf_Phase_Types::Clear);
// Clear the input
// size_t input_entries = block_size * A.max_rank * rows;
// fillArray(workspace.base_buffer_input, input_entries, 0, stream, hw);
// Reset the row index array
generateSequence(row_indices, rows, k + 1, stream, hw);
// Keep track of how many samples were taken
int *current_samples = small_vectors;
fillArray(current_samples, rows, 0, stream, hw);
TLR_Potrf_Phase_Times<hw>::endPhase(TLR_Potrf_Phase_Types::Clear);
T **sub_B_ptrs = sub_Y_ptrs;
int projected_blocks = 0;
block_rows = rows;
while (projected_blocks < rows)
{
int n_rows = std::min(block_rows, rows - projected_blocks);
// Figure out how many samples we need per block
// tlr_determine_projection_samples<T, hw>(
// samples_batch, sample_bs, current_samples, detected_ranks,
// Q_ptrs, sub_Q_ptrs, bs_batch, input_ptrs, sub_B_ptrs, bs_batch,
// n_rows, stream
// );
sub_Q_ptrs = Q_ptrs + projected_blocks;
sub_B_ptrs = input_ptrs + projected_blocks;
samples_batch = detected_ranks + projected_blocks;
int max_samples = getMaxElement(samples_batch, n_rows, stream, hw);
// Sample the sum of the low rank updates in each block row in the sub-matrix A(k+1:end, 0:k-1)
tlr_potrf_sample_lru<T, hw, true>(A, k, row_indices, n_rows, 0, k - 1, sub_Q_ptrs, sub_B_ptrs, samples_batch,
max_samples, workspace, stream);
// Sample the current block column k and subtract the result from the previously accumulated samples
// i.e A_ik * R_i - sum_{j=0:k-1} A_ij * A_kj^T * R_i
tlr_potrf_sample_col<T, hw, true>(A, k, row_indices, n_rows, sub_Q_ptrs, sub_B_ptrs, samples_batch, max_samples,
workspace, stream);
row_indices += n_rows;
projected_blocks += n_rows;
}
TLR_Potrf_Phase_Times<hw>::startPhase(TLR_Potrf_Phase_Types::Realloc);
// Now reallocate the block column to the new ranks
A.allocateBlockColumn(k, detected_ranks, stream);
//...and copy the new low rank data
T **col_block_U_ptrs = vec_ptr(A.block_U_ptrs) + k + 1 + k * n_block;
T **col_block_V_ptrs = vec_ptr(A.block_V_ptrs) + k + 1 + k * n_block;
int new_max_rank = getMaxElement(detected_ranks, rows, stream, hw);
check_kblas_error((H2OpusBatched<T, hw>::copyBlock)(stream, bs_batch, detected_ranks, block_size, new_max_rank,
col_block_U_ptrs, bs_batch, Q_ptrs, bs_batch, rows));
check_kblas_error((H2OpusBatched<T, hw>::copyBlock)(stream, bs_batch, detected_ranks, block_size, new_max_rank,
col_block_V_ptrs, bs_batch, input_ptrs, bs_batch, rows));
TLR_Potrf_Phase_Times<hw>::endPhase(TLR_Potrf_Phase_Types::Realloc);
}
template <class T, int hw>
void tlr_potrf_schurcompensation_update(T *D, T *update, int block_size, T eps, T *U_update, T *S_update, T *V_update,
H2OpusTLRPotrfWorkspace<T> &workspace, int *rank_ptr,
h2opusComputeStream_t stream, h2opusHandle_t h2opus_handle)
{
TLR_Potrf_Phase_Times<hw>::startPhase(TLR_Potrf_Phase_Types::SchurCompensation);
const int ara_bs = 32, ara_r = 10;
// thrust::host_vector<T> host_diff(block_size * block_size);
// copyVector(host_diff, update, block_size * block_size, hw);
//
// // Akk = U * S * V
// std::vector<T> svd_ws(block_size, 0);
// int ret = lapack_gesvd(block_size, block_size, update, block_size, S_update, U_update, block_size, V_update,
// block_size, &svd_ws[0]); if(ret != 0)
// printf("SVD failed: %d\n", ret);
//
// int rank = 0;
// while(rank < block_size && S_update[rank] > eps)
// rank++;
// printf("Dense update compressed to %d\n", rank);
//
// // U = U * S
// #pragma omp parallel for
// for(int j = 0; j < rank; j++)
// for(int i = 0; i < block_size; i++)
// U_update[i + j * block_size] = U_update[i + j * block_size] * S_update[j];
//
// // blas_gemm<T, H2OPUS_HWTYPE_CPU>(
// // stream, H2Opus_NoTrans, H2Opus_NoTrans, block_size, block_size, rank,
// // (T)(-1), U_update, block_size, V_update, block_size, (T)1, vec_ptr(host_diff), block_size
// // );
// //
// // T diff = 0;
// // for(int i = 0; i < block_size * block_size; i++)
// // diff += host_diff[i] * host_diff[i];
// // printf("Approximation error = %e\n", sqrt(diff));
//
// // Akk = Akk - U * V
// blas_gemm<T, hw>(
// stream, H2Opus_NoTrans, H2Opus_NoTrans, block_size, block_size, rank,
// (T)(-1), U_update, block_size, V_update, block_size, (T)1, D, block_size
// );
int rank;
// eps = eps * 50 * sqrt(2 / M_PI);
if (hw == H2OPUS_HWTYPE_CPU)
{
// copyArray(update, U_update, block_size * block_size, stream, hw);
//
// check_kblas_error((H2OpusBatched<T, hw>::geqp2)(
// stream, block_size, block_size, U_update, block_size, block_size * block_size,
// S_update, block_size, rank_ptr, eps, 1
// ) );
//
// rank = thrust_get_value<hw>(rank_ptr);
//
// check_kblas_error((H2OpusBatched<T, hw>::orgqr)(
// stream, block_size, rank, U_update, block_size, block_size * block_size,
// S_update, block_size, 1
// ) );
// // printf("Dense update compressed to %d\n", rank);
//
// blas_gemm<T, hw>(
// stream, H2Opus_Trans, H2Opus_NoTrans, rank, block_size, block_size,
// (T)1, U_update, block_size, update, block_size, (T)0, V_update, block_size
// );
// // Akk = Akk - U * V
// blas_gemm<T, hw>(
// stream, H2Opus_NoTrans, H2Opus_NoTrans, block_size, block_size, rank,
// (T)(-1), U_update, block_size, V_update, block_size, (T)1, D, block_size
// );
// Grab workspace
T *ara_Z = workspace.svd_ws;
T *ara_R_diag = ara_Z + block_size * block_size;
rank = h2opus_ara(stream, block_size, block_size, update, block_size, U_update, block_size, V_update,
block_size, ara_Z, block_size, ara_R_diag, eps, ara_bs, ara_r, block_size, h2opus_handle);
// Akk = Akk - U * V'
// blas_gemm<T, hw>(
// stream, H2Opus_NoTrans, H2Opus_Trans, block_size, block_size, rank,
// (T)(-1), U_update, block_size, V_update, block_size, (T)1, D, block_size
// );
// Compute D = D - update + diag(Sc_diag)
// Sc_diag = row_sum(abs(S_diff)) (S_diff is symmetric)
// S_diff = update - U * V'
// D = D - update
blas_axpy<T, hw>(stream, block_size * block_size, (T)(-1), update, 1, D, 1);
if (rank != block_size)
{
T *S_diff = workspace.svd_ws;
h2opus_fbl_lacpy(H2OpusFBLAll, block_size, block_size, update, block_size, ara_Z, block_size);
blas_gemm<T, hw>(stream, H2Opus_NoTrans, H2Opus_Trans, block_size, block_size, rank, (T)(-1), U_update,
block_size, V_update, block_size, (T)1, S_diff, block_size);
for (int i = 0; i < block_size; i++)
{
T scdiag = 0;
for (int j = 0; j < block_size; j++)
scdiag += fabs(S_diff[j + i * block_size]);
D[i + i * block_size] += scdiag;
}
}
}
else
{
#ifdef H2OPUS_USE_GPU
int *bs_batch = workspace.sampling_bs_batch;
kblas_ara_batch(stream->getKblasHandle(), bs_batch, bs_batch, workspace.ptr_svd, bs_batch,
workspace.ptr_svd + 1, bs_batch, workspace.ptr_svd + 2, bs_batch, rank_ptr, eps, block_size,
block_size, block_size, ara_bs, ara_r, h2opus_handle->getKblasRandState(), 0, 1);
rank = thrust_get_value<hw>(rank_ptr);
// printf("Dense update compressed to %d\n", rank);
// Akk = Akk - U * V'
blas_gemm<T, hw>(stream, H2Opus_NoTrans, H2Opus_Trans, block_size, block_size, rank, (T)(-1), U_update,
block_size, V_update, block_size, (T)1, D, block_size);
#endif
}
TLR_Potrf_Phase_Times<hw>::endPhase(TLR_Potrf_Phase_Types::SchurCompensation);
}
template <class T, int hw>
void tlr_potrf_diagonal_update(TTLR_Matrix<T, hw> &A, T sc_eps, int k, H2OpusTLRPotrfWorkspace<T> &workspace,
h2opusComputeStream_t stream, h2opusHandle_t h2opus_handle)
{
// We do multiple updates in parallel to workspace buffers, then accumulate
// them at the end into the diagonal block
int block_size = A.block_size;
int par_dense_updates = workspace.num_dense_parallel_buffers;
int applied_dense_update = 0;
int *bs_batch = workspace.dense_bs_batch, *rank_batch = workspace.dense_rank_batch;
int *max_rank_batch = workspace.dense_max_rank_batch;
T **ptr_G = workspace.dense_buffer_G, **ptr_T = workspace.dense_buffer_T;
T **ptr_D = workspace.dense_buffer_D, **ptr_U = workspace.dense_U_ptrs;
T **ptr_V = workspace.dense_V_ptrs;
TLR_Potrf_Phase_Times<hw>::startPhase(TLR_Potrf_Phase_Types::DenseUpdate);
#ifdef H2OPUS_TLR_POTRF_USE_MAGMA_FIX
// Clear out the dense buffers since Magma doesn't set C = C * beta when the inner dimension of the gemm is zero
if (hw == H2OPUS_HWTYPE_GPU)
{
size_t num_dense_buffer_entries = std::min(par_dense_updates, k) * block_size * block_size;
fillArray(workspace.base_dense_buffer_D, num_dense_buffer_entries, 0, stream, hw);
}
#endif
T dense_beta = 0;
// Marshal the entire block row operations
tlr_potrf_marshal_dense_updates<T, hw>(vec_ptr(A.block_U_ptrs), vec_ptr(A.block_V_ptrs), k, A.n_block,
vec_ptr(A.block_ranks), ptr_U, ptr_V, rank_batch, NULL, 0, NULL, k, stream);
// Find out the max rank for the segmented batches of non-uniform gemms
int *local_seg_maxes = workspace.dense_segment_maxes;
int num_segments = getSegmentedMaxElements(rank_batch, k, par_dense_updates, local_seg_maxes, stream, hw);
int *seg_maxes = workspace.dense_segment_maxes_host;
copyVectorToHost<int>(seg_maxes, local_seg_maxes, num_segments, hw);
for (int seg = 0; seg < num_segments; seg++)
{
int num_updates = std::min(par_dense_updates, k - applied_dense_update);
int max_subset_rank = seg_maxes[seg];
// Execute the batches
// For each low rank update of the form R_i R_i^T where R_i = U_i V_i^T is a low rank block
// We must first compute G_i = V_i^T V_i, then compute T_i = U_i G_i and finally D_i = T_i U_i^T
check_kblas_error((H2OpusBatched<T, hw>::gemm)(stream, H2Opus_Trans, H2Opus_NoTrans, rank_batch, rank_batch,
bs_batch, max_subset_rank, max_subset_rank, block_size, (T)1,
(const T **)ptr_V, bs_batch, (const T **)ptr_V, bs_batch, 0,
ptr_G, max_rank_batch, num_updates));
check_kblas_error((H2OpusBatched<T, hw>::gemm)(stream, H2Opus_NoTrans, H2Opus_NoTrans, bs_batch, rank_batch,
rank_batch, block_size, max_subset_rank, max_subset_rank, (T)1,
(const T **)ptr_U, bs_batch, (const T **)ptr_G, max_rank_batch,
0, ptr_T, bs_batch, num_updates));
check_kblas_error((H2OpusBatched<T, hw>::gemm)(stream, H2Opus_NoTrans, H2Opus_Trans, bs_batch, bs_batch,
rank_batch, block_size, block_size, max_subset_rank, (T)1,
(const T **)ptr_T, bs_batch, (const T **)ptr_U, bs_batch,
dense_beta, ptr_D, bs_batch, num_updates));
applied_dense_update += num_updates;
rank_batch += num_updates;
ptr_U += num_updates;
ptr_V += num_updates;
dense_beta = 1;
}
TLR_Potrf_Phase_Times<hw>::endPhase(TLR_Potrf_Phase_Types::DenseUpdate);
// Do a reduction on all buffers to update the diagonal block
int updates_to_reduce = std::min(k, par_dense_updates);
if (updates_to_reduce <= 0)
return;
#ifdef H2OPUS_TLR_USE_SCHUR_COMPENSATION
if (sc_eps == 0)
{
#endif
TLR_Potrf_Phase_Times<hw>::startPhase(TLR_Potrf_Phase_Types::Reduction);
TLR_Batch<T, hw>::reduceMatrixBuffers(1, vec_ptr(A.diagonal_block_ptrs) + k, bs_batch, bs_batch, bs_batch, -1,
ptr_D, bs_batch, updates_to_reduce, block_size, block_size, 1, stream);
TLR_Potrf_Phase_Times<hw>::endPhase(TLR_Potrf_Phase_Types::Reduction);
#ifdef H2OPUS_TLR_USE_SCHUR_COMPENSATION
}
else
{
T *D_k = workspace.svd_A, *U = workspace.svd_U, *V = workspace.svd_V, *S = workspace.svd_S;
T **ptr_A = workspace.ptr_svd;
TLR_Potrf_Phase_Times<hw>::startPhase(TLR_Potrf_Phase_Types::Reduction);
TLR_Batch<T, hw>::reduceMatrixBuffers(0, ptr_A, bs_batch, bs_batch, bs_batch, 1, ptr_D, bs_batch,
updates_to_reduce, block_size, block_size, 1, stream);
TLR_Potrf_Phase_Times<hw>::endPhase(TLR_Potrf_Phase_Types::Reduction);
T *A_kk = A.getDiagonalBlockHostPtr(k);
tlr_potrf_schurcompensation_update<T, hw>(A_kk, D_k, block_size, sc_eps, U, S, V, workspace,
workspace.converged_blocks, stream, h2opus_handle);
}
#endif
}
template <class T, int hw>
void tlr_potrf_diagonal_factorize(TTLR_Matrix<T, hw> &A, T eps, int k, H2OpusTLRPotrfWorkspace<T> &workspace,
h2opusComputeStream_t stream)
{
TLR_Potrf_Phase_Times<hw>::startPhase(TLR_Potrf_Phase_Types::Potrf);
int block_size = A.block_size;
T *A_kk = A.getDiagonalBlockHostPtr(k);
int info;
// Factorize the diagonal block using dense cholesky
#if 1
lapack_potrf<T, hw>(stream, block_size, A_kk, block_size, &info);
if (info != 0)
{
printf(
"TLR matrix was semi-definite. Cholesky failed at column %d of block column %d (A = %p lda = %d n = %d)\n",
info, k, A_kk, block_size, block_size);
exit(0);
}
#else
info = 1;
// Make a temporary copy of the diagonal block
std::vector<T> spd_work(block_size * block_size * 4);
T *A_kk_temp = vec_ptr(spd_work);
T *LD_temp = A_kk_temp + block_size * block_size;
T *D_temp = LD_temp + block_size * block_size;
T *A_temp2 = D_temp + block_size * block_size;
int *ipiv = workspace.dense_ipiv;
copyVector(A_kk_temp, hw, A_kk, hw, block_size * block_size);
lapack_potrf<T, hw>(stream, block_size, A_kk_temp, block_size, &info);
if (info != 0)
{
printf("TLR matrix was semi-definite. Cholesky failed at column %d of block column %d.\n", info, k);
// Reset A_kk_temp
copyVector(A_kk_temp, hw, A_kk, hw, block_size * block_size);
copyVector(A_temp2, hw, A_kk, hw, block_size * block_size);
// char buffer[256]; sprintf(buffer, "block_%d.bin", k);
// save_matrix(A_temp2, block_size * block_size, buffer);
// Make it positive definite
tlr_util_make_spd<T, hw>(block_size, A_kk_temp, block_size, eps * 10, D_temp, block_size, LD_temp, block_size,
A_kk, block_size, ipiv, stream);
T diff = 0, norm_A = 0;
for (int i = 0; i < block_size; i++)
{
for (int j = 0; j < block_size; j++)
{
norm_A += A_temp2[i + j * block_size] * A_temp2[i + j * block_size];
diff += (A_temp2[i + j * block_size] - A_kk[i + j * block_size]) *
(A_temp2[i + j * block_size] - A_kk[i + j * block_size]);
}
}
printf("SPD Diff = %e | %e\n", sqrt(diff), sqrt(diff / norm_A));
// Try again - if it fails then all is lost and we abandon ship
lapack_potrf<T, hw>(stream, block_size, A_kk, block_size, &info);
if (info != 0)
printf("Failed to make the diagonal block %d SPD. Failed at column %d\n", k, info);
}
else
{
// Copy the successfully factorized block back to the TLR matrix
copyVector(A_kk, hw, A_kk_temp, hw, block_size * block_size);
}
#endif
TLR_Potrf_Phase_Times<hw>::endPhase(TLR_Potrf_Phase_Types::Potrf);
}
template <class T, int hw>
void tlr_potrf_panel_trsm(TTLR_Matrix<T, hw> &A, T eps, int k, H2OpusTLRPotrfWorkspace<T> &workspace,
h2opusComputeStream_t stream)
{
// For j = k+1:end, we have to update each L_jk = A_jk * L_kk^-T = U_jk V_jk^T * L_kk^-T
// So all we really need to do is update V_jk^T = V_jk^T * L_kk^-T (or V_jk = L_kk^-1 * V_jk)
// which translate to a triangular solve L_kk X = V_jk
int batchCount = A.n_block - k - 1;
int block_size = A.block_size;
int n_block = A.n_block;
TLR_Potrf_Phase_Times<hw>::startPhase(TLR_Potrf_Phase_Types::Trsm);
if (batchCount > 0)
{
T *L_kk = A.getDiagonalBlockHostPtr(k);
int subdiag_index_start = k * n_block + k + 1;
int *m_batch = workspace.trsm_m, *n_batch = vec_ptr(A.block_ranks) + subdiag_index_start;
T **A_ptrs = workspace.trsm_A_ptrs, **B_ptrs = vec_ptr(A.block_V_ptrs) + subdiag_index_start;
fillArray(m_batch, batchCount, block_size, stream, hw);
fillArray(A_ptrs, batchCount, L_kk, stream, hw);
int max_n = getMaxElement(n_batch, batchCount, stream, hw);
check_kblas_error((H2OpusBatched<T, hw>::trsm)(stream, H2Opus_Left, H2Opus_Lower, H2Opus_NoTrans,
H2Opus_NonUnit, m_batch, n_batch, block_size, max_n, 1, A_ptrs,
m_batch, B_ptrs, m_batch, batchCount));
#if 0
// Compress the result using SVD:
// T2 and T4 from the workspace have enough memory to hold the singular vectors
T **svd_U_ptrs = workspace.sampling_buffer_T2, **svd_V_ptrs = workspace.sampling_buffer_T4;
// T1 has enough memory for the singular values
T **svd_S_ptrs = workspace.sampling_buffer_T1;
// Copy the old U and V to the sampling input and output buffers of the workspace
T **old_tlr_U_ptrs = workspace.sampling_input, **old_tlr_V_ptrs = workspace.sampling_output;
T **col_block_U_ptrs = vec_ptr(A.block_U_ptrs) + subdiag_index_start;
T **col_block_V_ptrs = B_ptrs;
int *bs_batch = workspace.sampling_bs_batch;
int *detected_ranks = workspace.detected_ranks;
int *current_ranks = n_batch;
check_kblas_error((H2OpusBatched<T, hw>::copyBlock)(
stream, bs_batch, current_ranks, block_size, max_n, old_tlr_U_ptrs,
bs_batch, col_block_U_ptrs, bs_batch, batchCount));
check_kblas_error((H2OpusBatched<T, hw>::copyBlock)(
stream, bs_batch, current_ranks, block_size, max_n, old_tlr_V_ptrs,
bs_batch, col_block_V_ptrs, bs_batch, batchCount));
// Set V = U_svd S_svd V_svd (V_svd contains the transpose of the right singular vectors)
check_kblas_error((H2OpusBatched<T, hw>::gesvd)(
stream, bs_batch, current_ranks, block_size, max_n, old_tlr_V_ptrs, bs_batch,
svd_U_ptrs, bs_batch, svd_S_ptrs, svd_V_ptrs, bs_batch, eps, detected_ranks,
batchCount
) );
// printDenseMatrix(old_tlr_V_ptrs[0], bs_batch[0], bs_batch[0], current_ranks[0], 14, "A");
// printDenseMatrix(svd_U_ptrs[0], bs_batch[0], bs_batch[0], current_ranks[0], 14, "U");
// printDenseMatrix(svd_S_ptrs[0], bs_batch[0], current_ranks[0], 1, 14, "S");
// printDenseMatrix(svd_V_ptrs[0], bs_batch[0], current_ranks[0], current_ranks[0], 14, "V");
// for(int i = 0; i < batchCount; i++)
// printf("(%d %d): %d -> %d\n", i + k + 1, k, current_ranks[i], detected_ranks[i]);
// Update the block column ranks
A.allocateBlockColumn(k, detected_ranks, stream);
int max_detected_rank = getMaxElement(detected_ranks, batchCount, stream, hw);
// Update the pointers and set the new U and V
col_block_U_ptrs = vec_ptr(A.block_U_ptrs) + subdiag_index_start;
col_block_V_ptrs = vec_ptr(A.block_V_ptrs) + subdiag_index_start;
// The new U = U_old * V_svd^T
check_kblas_error((H2OpusBatched<T, hw>::gemm)(stream, H2Opus_NoTrans, H2Opus_Trans, bs_batch, detected_ranks,
current_ranks, block_size, max_detected_rank, max_n, (T)1,
(const T **)old_tlr_U_ptrs, bs_batch, (const T **)svd_V_ptrs, bs_batch,
0, col_block_U_ptrs, bs_batch, batchCount));
// The new V = U_svd * S_svd
check_kblas_error((H2OpusBatched<T, hw>::diagRightMult)(stream, bs_batch, detected_ranks, block_size, max_detected_rank,
svd_U_ptrs, bs_batch, (const T **)svd_S_ptrs,
col_block_V_ptrs, bs_batch, batchCount));
#endif
}
TLR_Potrf_Phase_Times<hw>::endPhase(TLR_Potrf_Phase_Types::Trsm);
}
template <class T, int hw>
void tlr_pstrf_pivot_swap(TTLR_Matrix<T, hw> &A, int k, int pivot_index, int *piv,
H2OpusTLRPotrfWorkspace<T> &workspace, h2opusComputeStream_t stream)
{
int n_block = A.n_block;
if (piv && pivot_index != k)
{
// Swap the columns and rows k and pivot_index using only the lower triangular indices
A.swapDiagonalBlocks(k, pivot_index, stream);
A.swapBlocks(k, k, 0, n_block, pivot_index, 0, n_block, false, stream);
if (pivot_index < n_block - 1)
A.swapBlocks(n_block - pivot_index - 1, pivot_index + 1, k, 1, pivot_index + 1, pivot_index, 1, false,
stream);
A.swapBlocks(pivot_index - k - 1, k + 1, k, 1, pivot_index, k + 1, n_block, true, stream);
A.transposeBlock(pivot_index, k, stream);
// Swap the pivot index entries
swap_vectors(1, piv + k, 1, piv + pivot_index, 1, hw, stream);
// Swap the dense update pointers
swap_vectors(1, workspace.dense_buffer_D + k, 1, workspace.dense_buffer_D + pivot_index, 1, hw, stream);
}
}
template <class T, int hw>
void tlr_pstrf_update_dense_updates(TTLR_Matrix<T, hw> &A, int k, H2OpusTLRPotrfWorkspace<T> &workspace,
h2opusComputeStream_t stream)
{
int num_updates = A.n_block - k - 1;
if (num_updates <= 0)
return;
TLR_Potrf_Phase_Times<hw>::startPhase(TLR_Potrf_Phase_Types::DenseUpdate);
int block_size = A.block_size;
// Marshall the pointers to the update
int *bs_batch = workspace.dense_bs_batch;
int *max_rank_batch = workspace.dense_max_rank_batch;
T **ptr_G = workspace.dense_buffer_G, **ptr_T = workspace.dense_buffer_T;
T **ptr_D = workspace.dense_buffer_D + k + 1;
T **ptr_U = vec_ptr(A.block_U_ptrs) + k * A.n_block + k + 1;
T **ptr_V = vec_ptr(A.block_V_ptrs) + k * A.n_block + k + 1;
int *rank_batch = vec_ptr(A.block_ranks) + k * A.n_block + k + 1;
int max_subset_rank = getMaxElement(rank_batch, num_updates, stream, hw);
// Execute the batches
// For each low rank update of the form R_i R_i^T where R_i = U_i V_i^T is a low rank block
// We must first compute G_i = V_i^T V_i, then compute T_i = U_i G_i and finally D_i = T_i U_i^T
check_kblas_error((H2OpusBatched<T, hw>::gemm)(stream, H2Opus_Trans, H2Opus_NoTrans, rank_batch, rank_batch,
bs_batch, max_subset_rank, max_subset_rank, block_size, (T)1,
(const T **)ptr_V, bs_batch, (const T **)ptr_V, bs_batch, 0, ptr_G,
max_rank_batch, num_updates));
check_kblas_error((H2OpusBatched<T, hw>::gemm)(stream, H2Opus_NoTrans, H2Opus_NoTrans, bs_batch, rank_batch,
rank_batch, block_size, max_subset_rank, max_subset_rank, (T)1,
(const T **)ptr_U, bs_batch, (const T **)ptr_G, max_rank_batch, 0,
ptr_T, bs_batch, num_updates));
check_kblas_error((H2OpusBatched<T, hw>::gemm)(
stream, H2Opus_NoTrans, H2Opus_Trans, bs_batch, bs_batch, rank_batch, block_size, block_size, max_subset_rank,
(T)1, (const T **)ptr_T, bs_batch, (const T **)ptr_U, bs_batch, 1, ptr_D, bs_batch, num_updates));
TLR_Potrf_Phase_Times<hw>::endPhase(TLR_Potrf_Phase_Types::DenseUpdate);
}
template <class T, int hw>
int tlr_pstrf_select_pivot(TTLR_Matrix<T, hw> &A, int k, H2OpusTLRPotrfWorkspace<T> &workspace,
h2opusComputeStream_t stream, h2opusHandle_t h2opus_handle)
{
int block_size = A.block_size;
int pivot_index = k;
struct Compare
{
T val;
int index;
};
Compare max;
max.val = 0;
max.index = pivot_index;
TLR_Potrf_Phase_Times<hw>::startPhase(TLR_Potrf_Phase_Types::PivotSelection);
/* Not used so far */
#if 0
#if defined(_OPENMP) && _OPENMP >= 201811
#pragma omp declare reduction(maximum : struct Compare : omp_out = omp_in.val > omp_out.val ? omp_in : omp_out)
#endif
#pragma omp parallel for reduction(maximum : max)
#endif
for (int i = k; i < A.n_block; i++)
{
T *diagonal_block = A.getDiagonalBlockHostPtr(i);
T *update_block = workspace.dense_buffer_D[i];
T block_norm = 0;
#pragma omp parallel for reduction(+ : block_norm) schedule(runtime) num_threads(std::min(stream->getMaxOmpThreads(), block_size * block_size))
for (int b = 0; b < block_size * block_size; b++)
block_norm += (diagonal_block[b] - update_block[b]) * (diagonal_block[b] - update_block[b]);
block_norm = sqrt(block_norm);
if (block_norm > max.val)
{
max.val = block_norm;
max.index = i;
}
}
pivot_index = max.index;
TLR_Potrf_Phase_Times<hw>::endPhase(TLR_Potrf_Phase_Types::PivotSelection);
return pivot_index;
}
template <class T, int hw>
void tlr_pstrf_update_diagonal(TTLR_Matrix<T, hw> &A, int k, T eps, H2OpusTLRPotrfWorkspace<T> &workspace,
h2opusComputeStream_t stream, h2opusHandle_t h2opus_handle)
{
int block_size = A.block_size;
#ifndef H2OPUS_TLR_USE_SCHUR_COMPENSATION
int *bs_batch = workspace.dense_bs_batch;
TLR_Batch<T, hw>::reduceMatrixBuffers(1, vec_ptr(A.diagonal_block_ptrs) + k, bs_batch, bs_batch, bs_batch, -1,
workspace.dense_buffer_D + k, bs_batch, 1, block_size, block_size, 1, stream);
#else
T *D = A.getDiagonalBlockHostPtr(k);
T *update = workspace.dense_buffer_D[k];
T *U = workspace.svd_U, *V = workspace.svd_V, *S = workspace.svd_S;
tlr_potrf_schurcompensation_update<T, hw>(D, update, block_size, eps, U, S, V, workspace,
workspace.converged_blocks, stream, h2opus_handle);
#endif
}
template <class T, int hw>
void tlr_potrf(TTLR_Matrix<T, hw> &A, TLR_Potrf_Config<T, hw> &config, int *piv, h2opusHandle_t h2opus_handle)
{
assert(A.type == H2OpusTLR_Symmetric);
// grab configuration
T eps = config.eps;
T sc_eps = config.sc_eps;
int ndpb = config.ndpb;
int nspb = config.nspb;
int sample_bs = config.sample_bs;
H2OpusWorkspaceState ws_needed = tlr_potrf_workspace<T, hw>(A, false, ndpb, nspb, sample_bs, piv, h2opus_handle);
H2OpusWorkspaceState ws_allocated = h2opus_handle->getWorkspaceState();
if (ws_allocated < ws_needed)
h2opus_handle->setWorkspaceState(ws_needed);
H2OpusTLRPotrfWorkspace<T> workspace;
tlr_potrf_get_workspace(A, workspace, false, ndpb, nspb, sample_bs, piv, h2opus_handle);
// No point trying to stream the dense part since many kernels have sync points
h2opusComputeStream_t main_stream = h2opus_handle->getMainStream();
h2opusComputeStream_t low_priority_stream = main_stream; // h2opus_handle->getLowPriorityStream();
// Change the type of A to lower triangular so that rank changes will affect the lower
// triangular half of A
A.type = H2OpusTLR_LowerTriangular;
if (piv)
{
assert(A.alloc == H2OpusTLRTile);
generateSequence(piv, A.n_block, 0, main_stream, hw);
// No GPU implementation yet. still need pivot selection sorted out
assert(hw == H2OPUS_HWTYPE_CPU);
}
// If you're averaging runtimes, make sure to copy the original matrix
// TTLR_Matrix<T, H2OPUS_HWTYPE_CPU> original_A(A, main_stream);
TLR_Potrf_Phase_Times<hw>::init();
float elapsed_time = 0;
const int nruns = 1;
PerformanceCounter::clearCounters();
for (int run = 0; run < nruns; run++)
{
// A.copy(original_A, main_stream);
Timer<hw> timer;
timer.init();
timer.start();
H2OpusEvents &events = h2opus_handle->getEvents();
events.allocateEvents<hw>(H2OpusDenseEvent, 1);
// Make sure the low priority stream waits for any work previously
// submitted on the main stream before launching any work
events.recordEvent<hw>(H2OpusDenseEvent, 0, main_stream);
events.streamWaitEvent<hw>(H2OpusDenseEvent, low_priority_stream, 0);
// Main left looking block cholesky loop
for (int k = 0; k < A.n_block; k++)
{
// Update diagonal block and factorize
if (piv)
{
if (k > 0)
tlr_pstrf_update_dense_updates<T, hw>(A, k - 1, workspace, low_priority_stream);
// Determine a pivot block and swap it with the current block
if (k < A.n_block - 1)
{
int pivot_index = tlr_pstrf_select_pivot(A, k, workspace, low_priority_stream, h2opus_handle);
// int pivot_index = k + rand() % (A.n_block - k);
tlr_pstrf_pivot_swap<T, hw>(A, k, pivot_index, piv, workspace, low_priority_stream);
}
// Subtract the update dense update to the diagonal block
if (k > 0)
tlr_pstrf_update_diagonal<T, hw>(A, k, sc_eps, workspace, low_priority_stream, h2opus_handle);
}
else
tlr_potrf_diagonal_update<T, hw>(A, sc_eps, k, workspace, low_priority_stream, h2opus_handle);
tlr_potrf_diagonal_factorize<T, hw>(A, eps, k, workspace, low_priority_stream);
events.recordEvent<hw>(H2OpusDenseEvent, 0, low_priority_stream);
// Next, update column k using ARA on the main stream
tlr_potrf_update_block_column<T, hw>(A, eps, k, workspace, main_stream, h2opus_handle);
// Make sure the dense updates are done
events.streamWaitEvent<hw>(H2OpusDenseEvent, main_stream, 0);
// Finally do a triangular solve on the updated blocks
tlr_potrf_panel_trsm<T, hw>(A, eps, k, workspace, main_stream);
}
elapsed_time += timer.stop();
}
// printf("TLR cholesky done in %.4f\n", elapsed_time / nruns);
const char *phase_names[] = {"Reduction", "Sample", "Projection", "Realloc", "Orthog", "Trsm",
"Potrf", "Clear", "RandGen", "SchurComp", "DenseUpdate", "PivotSelect"};
double total_time = 0;
for (int i = 0; i < TLR_Potrf_Phase_Types::TLR_Potrf_TotalPhases; i++)
printf("%12s ", phase_names[i]);
printf("%12s %12s\n", "Misc", "Total");
for (int i = 0; i < TLR_Potrf_Phase_Types::TLR_Potrf_TotalPhases; i++)
{
total_time += TLR_Potrf_Phase_Times<hw>::phase_times[i] / nruns;
printf("%12.4f ", TLR_Potrf_Phase_Times<hw>::phase_times[i] / nruns);
}
printf("%12.4f %12.4f\n", elapsed_time / nruns - total_time, elapsed_time / nruns);
// Clear all the uppertriangular halves of the diagonal dense blocks
H2OpusBatched<T, hw>::setUpperZero(main_stream, A.block_size, A.block_size, vec_ptr(A.diagonal_block_ptrs),
A.block_size, A.n_block);
// Clear the low ran pointers and ranks for the upper block triangle matrix
TLR_ClearUpperTriangle<T> ptr_clear(vec_ptr(A.block_U_ptrs), vec_ptr(A.block_V_ptrs), vec_ptr(A.block_ranks),
A.n_block);
int num_ptrs = A.n_block * A.n_block;
thrust::for_each(ThrustRuntime<hw>::get(main_stream), thrust::counting_iterator<int>(0),
thrust::counting_iterator<int>(num_ptrs), ptr_clear);
if (hw == H2OPUS_HWTYPE_CPU)
{
double gemm_gops = PerformanceCounter::getOpCount(PerformanceCounter::GEMM);
double potrf_gops = PerformanceCounter::getOpCount(PerformanceCounter::POTRF);
double trsm_gops = PerformanceCounter::getOpCount(PerformanceCounter::TRSM);
PerformanceCounter::clearCounters();
printf("Total GOPS = %.3f (%.3f gemm, %.3f potrf, %.3f trsm)\n", gemm_gops + potrf_gops + trsm_gops, gemm_gops,
potrf_gops, trsm_gops);
}
}
template <class T, int hw>
void tlr_potrf(TTLR_Matrix<T, hw> &A, TLR_Potrf_Config<T, hw> &config, h2opusHandle_t h2opus_handle)
{
tlr_potrf<T, hw>(A, config, NULL, h2opus_handle);
}
// Set X = (LL^t)^{-1} * X
template <class T, int hw> void tlr_potrs(TTLR_Matrix<T, hw> &L, int nrhs, T *x, int ldx, h2opusHandle_t h2opus_handle)
{
assert(L.type == H2OpusTLR_LowerTriangular);
// set X = L^{-1} * X
tlr_trsm<T, hw>(H2Opus_Left, H2Opus_NoTrans, 1, L, nrhs, x, ldx, h2opus_handle);
// set X = L^{-T} * X
tlr_trsm<T, hw>(H2Opus_Left, H2Opus_Trans, 1, L, nrhs, x, ldx, h2opus_handle);
}
#endif
|
fourier.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF OOO U U RRRR IIIII EEEEE RRRR %
% F O O U U R R I E R R %
% FFF O O U U RRRR I EEE RRRR %
% F O O U U R R I E R R %
% F OOO UUU R R IIIII EEEEE R R %
% %
% %
% MagickCore Discrete Fourier Transform Methods %
% %
% Software Design %
% Sean Burke %
% Fred Weinhaus %
% Cristy %
% July 2009 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/fourier.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#if defined(MAGICKCORE_FFTW_DELEGATE)
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
#include <complex.h>
#endif
#include <fftw3.h>
#if !defined(MAGICKCORE_HAVE_CABS)
#define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1]))
#endif
#if !defined(MAGICKCORE_HAVE_CARG)
#define carg(z) (atan2(cimag(z),creal(z)))
#endif
#if !defined(MAGICKCORE_HAVE_CIMAG)
#define cimag(z) (z[1])
#endif
#if !defined(MAGICKCORE_HAVE_CREAL)
#define creal(z) (z[0])
#endif
#endif
/*
Typedef declarations.
*/
typedef struct _FourierInfo
{
ChannelType
channel;
MagickBooleanType
modulus;
size_t
width,
height;
ssize_t
center;
} FourierInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p l e x I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ComplexImages() performs complex mathematics on an image sequence.
%
% The format of the ComplexImages method is:
%
% MagickBooleanType ComplexImages(Image *images,const ComplexOperator op,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A complex operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op,
ExceptionInfo *exception)
{
#define ComplexImageTag "Complex/Image"
CacheView
*Ai_view,
*Ar_view,
*Bi_view,
*Br_view,
*Ci_view,
*Cr_view;
const char
*artifact;
const Image
*Ai_image,
*Ar_image,
*Bi_image,
*Br_image;
double
snr;
Image
*Ci_image,
*complex_images,
*Cr_image,
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (images->next == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",images->filename);
return((Image *) NULL);
}
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
image=DestroyImageList(image);
return(image);
}
image->depth=32UL;
complex_images=NewImageList();
AppendImageToList(&complex_images,image);
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
{
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
AppendImageToList(&complex_images,image);
/*
Apply complex mathematics to image pixels.
*/
artifact=GetImageArtifact(image,"complex:snr");
snr=0.0;
if (artifact != (const char *) NULL)
snr=StringToDouble(artifact,(char **) NULL);
Ar_image=images;
Ai_image=images->next;
Br_image=images;
Bi_image=images->next;
if ((images->next->next != (Image *) NULL) &&
(images->next->next->next != (Image *) NULL))
{
Br_image=images->next->next;
Bi_image=images->next->next->next;
}
Cr_image=complex_images;
Ci_image=complex_images->next;
Ar_view=AcquireVirtualCacheView(Ar_image,exception);
Ai_view=AcquireVirtualCacheView(Ai_image,exception);
Br_view=AcquireVirtualCacheView(Br_image,exception);
Bi_view=AcquireVirtualCacheView(Bi_image,exception);
Cr_view=AcquireAuthenticCacheView(Cr_image,exception);
Ci_view=AcquireAuthenticCacheView(Ci_image,exception);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(images,complex_images,images->rows,1L)
#endif
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*magick_restrict Ai,
*magick_restrict Ar,
*magick_restrict Bi,
*magick_restrict Br;
register PixelPacket
*magick_restrict Ci,
*magick_restrict Cr;
register ssize_t
x;
if (status == MagickFalse)
continue;
Ar=GetCacheViewVirtualPixels(Ar_view,0,y,Ar_image->columns,1,exception);
Ai=GetCacheViewVirtualPixels(Ai_view,0,y,Ai_image->columns,1,exception);
Br=GetCacheViewVirtualPixels(Br_view,0,y,Br_image->columns,1,exception);
Bi=GetCacheViewVirtualPixels(Bi_view,0,y,Bi_image->columns,1,exception);
Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,Cr_image->columns,1,exception);
Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,Ci_image->columns,1,exception);
if ((Ar == (const PixelPacket *) NULL) ||
(Ai == (const PixelPacket *) NULL) ||
(Br == (const PixelPacket *) NULL) ||
(Bi == (const PixelPacket *) NULL) ||
(Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) images->columns; x++)
{
switch (op)
{
case AddComplexOperator:
{
Cr->red=Ar->red+Br->red;
Ci->red=Ai->red+Bi->red;
Cr->green=Ar->green+Br->green;
Ci->green=Ai->green+Bi->green;
Cr->blue=Ar->blue+Br->blue;
Ci->blue=Ai->blue+Bi->blue;
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity+Br->opacity;
Ci->opacity=Ai->opacity+Bi->opacity;
}
break;
}
case ConjugateComplexOperator:
default:
{
Cr->red=Ar->red;
Ci->red=(-Bi->red);
Cr->green=Ar->green;
Ci->green=(-Bi->green);
Cr->blue=Ar->blue;
Ci->blue=(-Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity;
Ci->opacity=(-Bi->opacity);
}
break;
}
case DivideComplexOperator:
{
double
gamma;
gamma=PerceptibleReciprocal(Br->red*Br->red+Bi->red*Bi->red+snr);
Cr->red=gamma*(Ar->red*Br->red+Ai->red*Bi->red);
Ci->red=gamma*(Ai->red*Br->red-Ar->red*Bi->red);
gamma=PerceptibleReciprocal(Br->green*Br->green+Bi->green*Bi->green+
snr);
Cr->green=gamma*(Ar->green*Br->green+Ai->green*Bi->green);
Ci->green=gamma*(Ai->green*Br->green-Ar->green*Bi->green);
gamma=PerceptibleReciprocal(Br->blue*Br->blue+Bi->blue*Bi->blue+snr);
Cr->blue=gamma*(Ar->blue*Br->blue+Ai->blue*Bi->blue);
Ci->blue=gamma*(Ai->blue*Br->blue-Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
gamma=PerceptibleReciprocal(Br->opacity*Br->opacity+Bi->opacity*
Bi->opacity+snr);
Cr->opacity=gamma*(Ar->opacity*Br->opacity+Ai->opacity*
Bi->opacity);
Ci->opacity=gamma*(Ai->opacity*Br->opacity-Ar->opacity*
Bi->opacity);
}
break;
}
case MagnitudePhaseComplexOperator:
{
Cr->red=sqrt(Ar->red*Ar->red+Ai->red*Ai->red);
Ci->red=atan2(Ai->red,Ar->red)/(2.0*MagickPI)+0.5;
Cr->green=sqrt(Ar->green*Ar->green+Ai->green*Ai->green);
Ci->green=atan2(Ai->green,Ar->green)/(2.0*MagickPI)+0.5;
Cr->blue=sqrt(Ar->blue*Ar->blue+Ai->blue*Ai->blue);
Ci->blue=atan2(Ai->blue,Ar->blue)/(2.0*MagickPI)+0.5;
if (images->matte != MagickFalse)
{
Cr->opacity=sqrt(Ar->opacity*Ar->opacity+Ai->opacity*Ai->opacity);
Ci->opacity=atan2(Ai->opacity,Ar->opacity)/(2.0*MagickPI)+0.5;
}
break;
}
case MultiplyComplexOperator:
{
Cr->red=QuantumScale*(Ar->red*Br->red-Ai->red*Bi->red);
Ci->red=QuantumScale*(Ai->red*Br->red+Ar->red*Bi->red);
Cr->green=QuantumScale*(Ar->green*Br->green-Ai->green*Bi->green);
Ci->green=QuantumScale*(Ai->green*Br->green+Ar->green*Bi->green);
Cr->blue=QuantumScale*(Ar->blue*Br->blue-Ai->blue*Bi->blue);
Ci->blue=QuantumScale*(Ai->blue*Br->blue+Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=QuantumScale*(Ar->opacity*Br->opacity-Ai->opacity*
Bi->opacity);
Ci->opacity=QuantumScale*(Ai->opacity*Br->opacity+Ar->opacity*
Bi->opacity);
}
break;
}
case RealImaginaryComplexOperator:
{
Cr->red=Ar->red*cos(2.0*MagickPI*(Ai->red-0.5));
Ci->red=Ar->red*sin(2.0*MagickPI*(Ai->red-0.5));
Cr->green=Ar->green*cos(2.0*MagickPI*(Ai->green-0.5));
Ci->green=Ar->green*sin(2.0*MagickPI*(Ai->green-0.5));
Cr->blue=Ar->blue*cos(2.0*MagickPI*(Ai->blue-0.5));
Ci->blue=Ar->blue*sin(2.0*MagickPI*(Ai->blue-0.5));
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity*cos(2.0*MagickPI*(Ai->opacity-0.5));
Ci->opacity=Ar->opacity*sin(2.0*MagickPI*(Ai->opacity-0.5));
}
break;
}
case SubtractComplexOperator:
{
Cr->red=Ar->red-Br->red;
Ci->red=Ai->red-Bi->red;
Cr->green=Ar->green-Br->green;
Ci->green=Ai->green-Bi->green;
Cr->blue=Ar->blue-Br->blue;
Ci->blue=Ai->blue-Bi->blue;
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity-Br->opacity;
Ci->opacity=Ai->opacity-Bi->opacity;
}
break;
}
}
Ar++;
Ai++;
Br++;
Bi++;
Cr++;
Ci++;
}
if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse)
status=MagickFalse;
if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ComplexImages)
#endif
proceed=SetImageProgress(images,ComplexImageTag,progress++,
images->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
Cr_view=DestroyCacheView(Cr_view);
Ci_view=DestroyCacheView(Ci_view);
Br_view=DestroyCacheView(Br_view);
Bi_view=DestroyCacheView(Bi_view);
Ar_view=DestroyCacheView(Ar_view);
Ai_view=DestroyCacheView(Ai_view);
if (status == MagickFalse)
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r w a r d F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ForwardFourierTransformImage() implements the discrete Fourier transform
% (DFT) of the image either as a magnitude / phase or real / imaginary image
% pair.
%
% The format of the ForwadFourierTransformImage method is:
%
% Image *ForwardFourierTransformImage(const Image *image,
% const MagickBooleanType modulus,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulus: if true, return as transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType RollFourier(const size_t width,const size_t height,
const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels)
{
double
*source_pixels;
MemoryInfo
*source_info;
register ssize_t
i,
x;
ssize_t
u,
v,
y;
/*
Move zero frequency (DC, average color) from (0,0) to (width/2,height/2).
*/
source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
return(MagickFalse);
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
i=0L;
for (y=0L; y < (ssize_t) height; y++)
{
if (y_offset < 0L)
v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset;
else
v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height :
y+y_offset;
for (x=0L; x < (ssize_t) width; x++)
{
if (x_offset < 0L)
u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset;
else
u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width :
x+x_offset;
source_pixels[v*width+u]=roll_pixels[i++];
}
}
(void) memcpy(roll_pixels,source_pixels,height*width*
sizeof(*source_pixels));
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType ForwardQuadrantSwap(const size_t width,
const size_t height,double *source_pixels,double *forward_pixels)
{
MagickBooleanType
status;
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,
source_pixels);
if (status == MagickFalse)
return(MagickFalse);
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x];
for (y=1; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[(height-y)*width+width/2L-x-1L]=
source_pixels[y*center+x+1L];
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[width/2L-x-1L]=source_pixels[x+1L];
return(MagickTrue);
}
static void CorrectPhaseLHS(const size_t width,const size_t height,
double *fourier_pixels)
{
register ssize_t
x;
ssize_t
y;
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
fourier_pixels[y*width+x]*=(-1.0);
}
static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info,
Image *image,double *magnitude,double *phase,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*magnitude_pixels,
*phase_pixels;
Image
*magnitude_image,
*phase_image;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
x;
ssize_t
i,
y;
magnitude_image=GetFirstImageInList(image);
phase_image=GetNextImageInList(image);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",image->filename);
return(MagickFalse);
}
/*
Create "Fourier Transform" image from constituent arrays.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
(void) memset(magnitude_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*magnitude_pixels));
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
(void) memset(phase_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*phase_pixels));
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude,magnitude_pixels);
if (status != MagickFalse)
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase,
phase_pixels);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]/=(2.0*MagickPI);
phase_pixels[i]+=0.5;
i++;
}
}
magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception);
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
magnitude_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(magnitude_view,exception);
if (status == MagickFalse)
break;
}
magnitude_view=DestroyCacheView(magnitude_view);
i=0L;
phase_view=AcquireAuthenticCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(phase_view,exception);
if (status == MagickFalse)
break;
}
phase_view=DestroyCacheView(phase_view);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info,
const Image *image,double *magnitude_pixels,double *phase_pixels,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*value;
double
*source_pixels;
fftw_complex
*forward_pixels;
fftw_plan
fftw_r2c_plan;
MemoryInfo
*forward_info,
*source_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Generate the forward Fourier transform.
*/
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
memset(source_pixels,0,fourier_info->width*fourier_info->height*
sizeof(*source_pixels));
i=0L;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
source_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
source_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
source_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
source_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
source_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
source_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
image_view=DestroyCacheView(image_view);
forward_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*forward_pixels));
if (forward_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
return(MagickFalse);
}
forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ForwardFourierTransform)
#endif
fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height,
source_pixels,forward_pixels,FFTW_ESTIMATE);
fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels);
fftw_destroy_plan(fftw_r2c_plan);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0))
{
double
gamma;
/*
Normalize Fourier transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
forward_pixels[i]*=gamma;
#else
forward_pixels[i][0]*=gamma;
forward_pixels[i][1]*=gamma;
#endif
i++;
}
}
/*
Generate magnitude and phase (or real and imaginary).
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=cabs(forward_pixels[i]);
phase_pixels[i]=carg(forward_pixels[i]);
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=creal(forward_pixels[i]);
phase_pixels[i]=cimag(forward_pixels[i]);
i++;
}
forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info);
return(MagickTrue);
}
static MagickBooleanType ForwardFourierTransformChannel(const Image *image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
double
*magnitude_pixels,
*phase_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
fourier_info.width=image->columns;
fourier_info.height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows : image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info == (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels,
phase_pixels,exception);
if (status != MagickFalse)
status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels,
phase_pixels,exception);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
#endif
MagickExport Image *ForwardFourierTransformImage(const Image *image,
const MagickBooleanType modulus,ExceptionInfo *exception)
{
Image
*fourier_image;
fourier_image=NewImageList();
#if !defined(MAGICKCORE_FFTW_DELEGATE)
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
image->filename);
#else
{
Image
*magnitude_image;
size_t
height,
width;
width=image->columns;
height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows :
image->columns;
width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
height=width;
magnitude_image=CloneImage(image,width,height,MagickTrue,exception);
if (magnitude_image != (Image *) NULL)
{
Image
*phase_image;
magnitude_image->storage_class=DirectClass;
magnitude_image->depth=32UL;
phase_image=CloneImage(image,width,height,MagickTrue,exception);
if (phase_image == (Image *) NULL)
magnitude_image=DestroyImage(magnitude_image);
else
{
MagickBooleanType
is_gray,
status;
phase_image->storage_class=DirectClass;
phase_image->depth=32UL;
AppendImageToList(&fourier_image,magnitude_image);
AppendImageToList(&fourier_image,phase_image);
status=MagickTrue;
is_gray=IsGrayImage(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GrayChannels,modulus,fourier_image,exception);
else
thread_status=ForwardFourierTransformChannel(image,RedChannel,
modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->matte != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->colorspace == CMYKColorspace)
thread_status=ForwardFourierTransformChannel(image,
IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImageList(fourier_image);
fftw_cleanup();
}
}
}
#endif
return(fourier_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InverseFourierTransformImage() implements the inverse discrete Fourier
% transform (DFT) of the image either as a magnitude / phase or real /
% imaginary image pair.
%
% The format of the InverseFourierTransformImage method is:
%
% Image *InverseFourierTransformImage(const Image *magnitude_image,
% const Image *phase_image,const MagickBooleanType modulus,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o magnitude_image: the magnitude or real image.
%
% o phase_image: the phase or imaginary image.
%
% o modulus: if true, return transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType InverseQuadrantSwap(const size_t width,
const size_t height,const double *source,double *destination)
{
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
for (y=1L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L+1L); x++)
destination[(height-y)*center-x+width/2L]=source[y*width+x];
for (y=0L; y < (ssize_t) height; y++)
destination[y*center]=source[y*width+width/2L];
for (x=0L; x < center; x++)
destination[x]=source[center-x-1L];
return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination));
}
static MagickBooleanType InverseFourier(FourierInfo *fourier_info,
const Image *magnitude_image,const Image *phase_image,
fftw_complex *fourier_pixels,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*inverse_pixels,
*magnitude_pixels,
*phase_pixels;
MagickBooleanType
status;
MemoryInfo
*inverse_info,
*magnitude_info,
*phase_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Inverse Fourier - read image and break down into a double array.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
inverse_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*inverse_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL) ||
(inverse_info == (MemoryInfo *) NULL))
{
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (inverse_info != (MemoryInfo *) NULL)
inverse_info=RelinquishVirtualMemory(inverse_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info);
i=0L;
magnitude_view=AcquireVirtualCacheView(magnitude_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
magnitude_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
magnitude_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
magnitude_view=DestroyCacheView(magnitude_view);
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude_pixels,inverse_pixels);
(void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*magnitude_pixels));
i=0L;
phase_view=AcquireVirtualCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
phase_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
phase_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
phase_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
phase_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
phase_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
phase_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]-=0.5;
phase_pixels[i]*=(2.0*MagickPI);
i++;
}
}
phase_view=DestroyCacheView(phase_view);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (status != MagickFalse)
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
phase_pixels,inverse_pixels);
(void) memcpy(phase_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*phase_pixels));
inverse_info=RelinquishVirtualMemory(inverse_info);
/*
Merge two sets.
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I*
magnitude_pixels[i]*sin(phase_pixels[i]);
#else
fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]);
fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]);
#endif
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i];
#else
fourier_pixels[i][0]=magnitude_pixels[i];
fourier_pixels[i][1]=phase_pixels[i];
#endif
i++;
}
magnitude_info=RelinquishVirtualMemory(magnitude_info);
phase_info=RelinquishVirtualMemory(phase_info);
return(status);
}
static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info,
fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
double
*source_pixels;
const char
*value;
fftw_plan
fftw_c2r_plan;
MemoryInfo
*source_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
i,
x;
ssize_t
y;
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if (LocaleCompare(value,"inverse") == 0)
{
double
gamma;
/*
Normalize inverse transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]*=gamma;
#else
fourier_pixels[i][0]*=gamma;
fourier_pixels[i][1]*=gamma;
#endif
i++;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InverseFourierTransform)
#endif
fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height,
fourier_pixels,source_pixels,FFTW_ESTIMATE);
fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels);
fftw_destroy_plan(fftw_c2r_plan);
i=0L;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
if (y >= (ssize_t) image->rows)
break;
q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width >
image->columns ? image->columns : fourier_info->width,1UL,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
if (x < (ssize_t) image->columns)
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
source_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
}
i++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType InverseFourierTransformChannel(
const Image *magnitude_image,const Image *phase_image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
fftw_complex
*inverse_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*inverse_info;
fourier_info.width=magnitude_image->columns;
fourier_info.height=magnitude_image->rows;
if ((magnitude_image->columns != magnitude_image->rows) ||
((magnitude_image->columns % 2) != 0) ||
((magnitude_image->rows % 2) != 0))
{
size_t extent=magnitude_image->columns < magnitude_image->rows ?
magnitude_image->rows : magnitude_image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
inverse_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*inverse_pixels));
if (inverse_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info);
status=InverseFourier(&fourier_info,magnitude_image,phase_image,
inverse_pixels,exception);
if (status != MagickFalse)
status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image,
exception);
inverse_info=RelinquishVirtualMemory(inverse_info);
return(status);
}
#endif
MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image,
const Image *phase_image,const MagickBooleanType modulus,
ExceptionInfo *exception)
{
Image
*fourier_image;
assert(magnitude_image != (Image *) NULL);
assert(magnitude_image->signature == MagickCoreSignature);
if (magnitude_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
magnitude_image->filename);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",magnitude_image->filename);
return((Image *) NULL);
}
#if !defined(MAGICKCORE_FFTW_DELEGATE)
fourier_image=(Image *) NULL;
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
magnitude_image->filename);
#else
{
fourier_image=CloneImage(magnitude_image,magnitude_image->columns,
magnitude_image->rows,MagickTrue,exception);
if (fourier_image != (Image *) NULL)
{
MagickBooleanType
is_gray,
status;
status=MagickTrue;
is_gray=IsGrayImage(magnitude_image,exception);
if (is_gray != MagickFalse)
is_gray=IsGrayImage(phase_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GrayChannels,modulus,fourier_image,exception);
else
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,RedChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->matte != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->colorspace == CMYKColorspace)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImage(fourier_image);
}
fftw_cleanup();
}
#endif
return(fourier_image);
}
|
line_search_contact_strategy.h | // KRATOS ______ __ __ _____ __ __ __
// / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ /
// / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ /
// / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / /
// \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS
//
// License: BSD License
// license: ContactStructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_LINE_SEARCH_CONTACT_STRATEGY)
#define KRATOS_LINE_SEARCH_CONTACT_STRATEGY
/* System Includes */
/* External Includes */
/* Project includes */
#include "includes/kratos_parameters.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/variables.h"
#include "solving_strategies/strategies/implicit_solving_strategy.h"
#include "solving_strategies/strategies/line_search_strategy.h"
#include "utilities/openmp_utils.h"
#include "utilities/variable_utils.h"
#include "utilities/atomic_utilities.h"
// Convergence criterias
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
// Default builder and solver
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
// TODO: Extend the descriptions
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/** \brief Short class definition.
This class
*/
template<class TSparseSpace,
class TDenseSpace, // = DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class LineSearchContactStrategy :
public LineSearchStrategy< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType;
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION( LineSearchContactStrategy );
typedef SolvingStrategy<TSparseSpace, TDenseSpace> SolvingStrategyType;
typedef ImplicitSolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType;
typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> NRBaseType;
typedef LineSearchStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef LineSearchContactStrategy<TSparseSpace, TDenseSpace, TLinearSolver> ClassType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef std::size_t IndexType;
/**
* @brief Default constructor
*/
explicit LineSearchContactStrategy()
{
}
/**
* @brief Default constructor. (with parameters)
* @param rModelPart The model part of the problem
* @param ThisParameters The configuration parameters
*/
explicit LineSearchContactStrategy(ModelPart& rModelPart, Parameters ThisParameters)
: BaseType(rModelPart, BaseType::GetDefaultParameters())
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* Default constructor
* @param rModelPart: The model part of the problem
* @param pScheme: The integration scheme
* @param pNewLinearSolver: The linear solver employed
* @param pNewConvergenceCriteria: The convergence criteria employed
* @param MaxIterationNumber: The maximum number of iterations
* @param CalculateReactions: The flag for the reaction calculation
* @param ReformDofSetAtEachStep: The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag: The flag that allows to move the mesh
*/
LineSearchContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})")
)
: BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag)
{
KRATOS_TRY;
Parameters default_parameters = this->GetDefaultParameters();
ThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* Default constructor
* @param rModelPart: The model part of the problem
* @param pScheme: The integration scheme
* @param pNewLinearSolver: The linear solver employed
* @param pNewConvergenceCriteria: The convergence criteria employed
* @param MaxIterationNumber: The maximum number of iterations
* @param CalculateReactions: The flag for the reaction calculation
* @param ReformDofSetAtEachStep: The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag: The flag that allows to move the mesh
*/
LineSearchContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})")
)
: BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag )
{
KRATOS_TRY;
Parameters default_parameters = this->GetDefaultParameters();
ThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* Destructor.
*/
~LineSearchContactStrategy() override
= default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Create method
* @param rModelPart The model part of the problem
* @param ThisParameters The configuration parameters
*/
typename SolvingStrategyType::Pointer Create(
ModelPart& rModelPart,
Parameters ThisParameters
) const override
{
return Kratos::make_shared<ClassType>(rModelPart, ThisParameters);
}
/**
* @brief This method returns the defaulr parameters in order to avoid code duplication
* @return Returns the default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "line_search_contact_strategy"
})" );
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "line_search_contact_strategy";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "LineSearchContactStrategy";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
bool mRecalculateFactor; // To check if we recalculate or not the scale factor
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* Performs all the required operations that should be done (for each step)
* before solving the solution step.
* A member variable should be used as a flag to make sure this function is called only once per step.
*/
void InitializeSolutionStep() override
{
BaseType::InitializeSolutionStep();
// TODO: Add something if necessary
}
/**
* Here the database is updated
*/
void UpdateDatabase(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
const bool MoveMesh
) override
{
typename TSchemeType::Pointer pScheme = this->GetScheme();
typename TBuilderAndSolverType::Pointer pBuilderAndSolver = this->GetBuilderAndSolver(); // FIXME: Separate in the parts of LM and displacement
TSystemVectorType aux(b.size()); //TODO: do it by using the space
TSparseSpace::Assign(aux, 0.5, Dx);
TSystemVectorType DxDisp(b.size());
TSystemVectorType DxLM(b.size());
ComputeSplitDx(Dx, DxDisp, DxLM);
// Compute residual without update
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b );
double roDisp;
double roLM;
ComputeMixedResidual(b, roDisp, roLM);
// Compute half step residual
NRBaseType::UpdateDatabase(A,aux,b,MoveMesh);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b );
double rhDisp;
double rhLM;
ComputeMixedResidual(b, rhDisp, rhLM);
// Compute full step residual (add another half Dx to the previous half)
NRBaseType::UpdateDatabase(A,aux,b,MoveMesh);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b );
double rfDisp;
double rfLM;
ComputeMixedResidual(b, rfDisp, rfLM);
// We compute the parabola
double XminDisp = 1e-3;
double XmaxDisp = 1.0;
double XminLM = 1e-3;
double XmaxLM = 1.0;
ComputeParabola(XminDisp, XmaxDisp, rfDisp, roDisp, rhDisp);
ComputeParabola(XminLM, XmaxLM, rfLM, roLM, rhLM);
// Perform final update
TSparseSpace::Assign(aux,-(1.0 - XmaxDisp), DxDisp);
TSparseSpace::UnaliasedAdd(aux,-(1.0 - XmaxLM), DxLM);
NRBaseType::UpdateDatabase(A,aux,b,MoveMesh);
}
/**
* This method split the vector of increment of DoF in displacement and LM
* @param Dx The increment of displacements and LM
* @param DxDisp The increment of displacements
* @param DxLM The increment of LM
*/
void ComputeSplitDx(
TSystemVectorType& Dx,
TSystemVectorType& DxDisp,
TSystemVectorType& DxLM
)
{
// Now we iterate over all the nodes
NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes();
const int num_nodes = static_cast<int>(nodes_array.size());
#pragma omp parallel for
for(int i = 0; i < num_nodes; ++i)
{
auto it_node = nodes_array.begin() + i;
for(auto itDoF = it_node->GetDofs().begin() ; itDoF != it_node->GetDofs().end() ; itDoF++)
{
const int j = (**itDoF).EquationId();
const std::size_t CurrVar = (**itDoF).GetVariable().Key();
if ((CurrVar == DISPLACEMENT_X) || (CurrVar == DISPLACEMENT_Y) || (CurrVar == DISPLACEMENT_Z))
{
DxDisp[j] = Dx[j];
DxLM[j] = 0.0;
}
else // Corresponding with contact
{
DxDisp[j] = 0.0;
DxLM[j] = Dx[j];
}
}
}
}
/**
* This method calculates the norm considering one norm for the displacement and other norm for the LM
* @param b The residual vector
* @param normDisp normDisp: The norm of the displacement
* @param normLM The norm of the LM
*/
void ComputeMixedResidual(
TSystemVectorType& b,
double& normDisp,
double& normLM
)
{
// Now we iterate over all the nodes
NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes();
const int num_nodes = static_cast<int>(nodes_array.size());
#pragma omp parallel for
for(int i = 0; i < num_nodes; ++i) {
auto it_node = nodes_array.begin() + i;
for(auto itDoF = it_node->GetDofs().begin() ; itDoF != it_node->GetDofs().end() ; itDoF++) {
const int j = (**itDoF).EquationId();
const std::size_t CurrVar = (**itDoF).GetVariable().Key();
if ((CurrVar == DISPLACEMENT_X) || (CurrVar == DISPLACEMENT_Y) || (CurrVar == DISPLACEMENT_Z)) {
AtomicAdd(normDisp, b[j] * b[j]);
} else { // Corresponding with contact
AtomicAdd(normLM, b[j] * b[j]);
}
}
}
normDisp = std::sqrt(normDisp);
normLM = std::sqrt(normLM);
}
/**
* This method computes the parabola necessary for the line search
* @param Xmax The maximal abscissa
* @param Xmin The norm of the LM
* @param rf The residual norm of the full step
* @param ro The residual norm without step
* @param rh The residual norm of the half step
*/
void ComputeParabola(
double& Xmax,
double& Xmin,
const double rf,
const double ro,
const double rh
)
{
// Compute optimal (limited to the range 0-1)
// Parabola is y = a*x^2 + b*x + c -> min/max for
// x=0 --> r=ro
// x=1/2 --> r=rh
// x=1 --> r =
// c= ro, b= 4*rh -rf -3*ro, a= 2*rf - 4*rh + 2*ro
// max found if a>0 at the position Xmax = (rf/4 - rh)/(rf - 2*rh);
const double parabole_a = 2 * rf + 2 * ro - 4 * rh;
const double parabole_b = 4 * rh - rf - 3 * ro;
if( parabole_a > 0.0) // If parabola has a local minima
{
Xmax = -0.5 * parabole_b/parabole_a; // -b / 2a
if( Xmax > 1.0)
Xmax = 1.0;
else if(Xmax < -1.0)
Xmax = -1.0;
}
else // Parabola degenerates to either a line or to have a local max. best solution on either extreme
{
if(rf < ro)
Xmax = 1.0;
else
Xmax = Xmin; // Should be zero, but otherwise it will stagnate
}
}
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
/**
* Copy constructor.
*/
LineSearchContactStrategy(const LineSearchContactStrategy& Other)
{
};
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class LineSearchContactStrategy */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // namespace Kratos
#endif /* KRATOS_LINE_SEARCH_CONTACT_STRATEGY */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.