source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
profile.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/colorspace-private.h"
#include "magick/configure.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/option-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#else
#include <wchar.h>
#include "lcms2.h"
#endif
#endif
/*
Forward declarations
*/
static MagickBooleanType
SetImageProfileInternal(Image *,const char *,const StringInfo *,
const MagickBooleanType);
static void
WriteTo8BimProfile(Image *,const char*,const StringInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickSignature);
image->color_profile.length=clone_image->color_profile.length;
image->color_profile.info=clone_image->color_profile.info;
image->iptc_profile.length=clone_image->iptc_profile.length;
image->iptc_profile.info=clone_image->iptc_profile.info;
if (clone_image->profiles != (void *) NULL)
{
if (image->profiles != (void *) NULL)
DestroyImageProfiles(image);
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
if (LocaleCompare(name,"icc") == 0)
{
/*
Continue to support deprecated color profile for now.
*/
image->color_profile.length=0;
image->color_profile.info=(unsigned char *) NULL;
}
if (LocaleCompare(name,"iptc") == 0)
{
/*
Continue to support deprecated IPTC profile for now.
*/
image->iptc_profile.length=0;
image->iptc_profile.info=(unsigned char *) NULL;
}
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
static unsigned short **DestroyPixelThreadSet(unsigned short **pixels)
{
register ssize_t
i;
assert(pixels != (unsigned short **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (unsigned short *) NULL)
pixels[i]=(unsigned short *) RelinquishMagickMemory(pixels[i]);
pixels=(unsigned short **) RelinquishMagickMemory(pixels);
return(pixels);
}
static unsigned short **AcquirePixelThreadSet(const size_t columns,
const size_t channels)
{
register ssize_t
i;
unsigned short
**pixels;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(unsigned short **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (unsigned short **) NULL)
return((unsigned short **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(unsigned short *) AcquireQuantumMemory(columns,channels*
sizeof(**pixels));
if (pixels[i] == (unsigned short *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
register ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image,
const cmsHPROFILE source_profile,const cmsUInt32Number source_type,
const cmsHPROFILE target_profile,const cmsUInt32Number target_type,
const int intent,const cmsUInt32Number flags)
{
cmsHTRANSFORM
*transform;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) ResetMagickMemory(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR((cmsContext) image,source_profile,
source_type,target_profile,target_type,intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
#endif
#if defined(MAGICKCORE_LCMS_DELEGATE)
static void LCMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
Image
*image;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
image=(Image *) context;
if (image != (Image *) NULL)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageWarning,"UnableToTransformColorspace","`%s'",image->filename);
}
#endif
static MagickBooleanType SetsRGBImageProfile(Image *image)
{
static unsigned char
sRGBProfile[] =
{
0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00,
0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20,
0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a,
0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00,
0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99,
0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67,
0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70,
0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88,
0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c,
0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24,
0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14,
0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24,
0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14,
0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14,
0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14,
0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14,
0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14,
0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36,
0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76,
0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77,
0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39,
0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31,
0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75,
0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77,
0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20,
0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d,
0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f,
0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20,
0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57,
0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65,
0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e,
0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20,
0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e,
0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e,
0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47,
0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61,
0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43,
0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63,
0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20,
0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00,
0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c,
0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2,
0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d,
0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00,
0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0,
0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87,
0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4,
0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19,
0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37,
0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54,
0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72,
0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90,
0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae,
0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb,
0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb,
0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d,
0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32,
0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59,
0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83,
0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1,
0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1,
0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14,
0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b,
0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84,
0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1,
0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00,
0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43,
0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a,
0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3,
0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20,
0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71,
0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4,
0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c,
0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77,
0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5,
0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37,
0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d,
0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07,
0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74,
0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5,
0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a,
0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2,
0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f,
0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf,
0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54,
0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc,
0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69,
0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9,
0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e,
0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26,
0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3,
0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64,
0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09,
0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3,
0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61,
0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13,
0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9,
0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84,
0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43,
0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06,
0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce,
0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b,
0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c,
0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41,
0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b,
0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa,
0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd,
0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5,
0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2,
0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3,
0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99,
0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94,
0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94,
0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98,
0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1,
0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf,
0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2,
0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda,
0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7,
0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18,
0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f,
0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b,
0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b,
0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1,
0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c,
0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c,
0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91,
0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb,
0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a,
0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f,
0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8,
0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37,
0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c,
0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05,
0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74,
0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8,
0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61,
0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0,
0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64,
0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee,
0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d,
0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12,
0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab,
0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b,
0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0,
0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a,
0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a,
0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00,
0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb,
0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c,
0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42,
0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f,
0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0,
0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8,
0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95,
0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78,
0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61,
0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f,
0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43,
0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d,
0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d,
0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43,
0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f,
0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60,
0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78,
0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95,
0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8,
0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1,
0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11,
0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46,
0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81,
0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2,
0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a,
0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57,
0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab,
0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04,
0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64,
0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca,
0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36,
0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8,
0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20,
0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f,
0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24,
0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf,
0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40,
0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8,
0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76,
0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a,
0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4,
0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75,
0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d,
0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea,
0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae,
0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79,
0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a,
0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21,
0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff,
0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3,
0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce,
0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf,
0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7,
0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5,
0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba,
0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6,
0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8,
0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1,
0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10,
0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36,
0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63,
0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96,
0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0,
0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11,
0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58,
0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7,
0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb,
0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57,
0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba,
0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff
};
StringInfo
*profile;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (GetImageProfile(image,"icc") != (const StringInfo *) NULL)
return(MagickFalse);
profile=AcquireStringInfo(sizeof(sRGBProfile));
SetStringInfoDatum(profile,sRGBProfile);
status=SetImageProfile(image,"icc",profile);
profile=DestroyStringInfo(profile);
return(status);
}
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,
const MagickBooleanType magick_unused(clone))
{
#define ProfileImageTag "Profile/Image"
#define ThrowProfileException(severity,tag,context) \
{ \
if (source_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_profile); \
if (target_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
magick_unreferenced(clone);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
*next;
/*
Delete image profile(s).
*/
ResetImageProfileIterator(image);
for (next=GetNextImageProfile(image); next != (const char *) NULL; )
{
if (IsOptionMember(next,name) != MagickFalse)
{
(void) DeleteImageProfile(image,next);
ResetImageProfileIterator(image);
}
next=GetNextImageProfile(image);
}
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace");
(void) value;
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image);
value=GetImageProperty(image,"exif:InteroperabilityIndex");
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image);
/* Future.
value=GetImageProperty(image,"exif:InteroperabilityIndex");
if (LocaleCompare(value,"R03.") != 0)
(void) SetAdobeRGB1998ImageProfile(image);
*/
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (LCMS)",
image->filename);
#else
{
cmsHPROFILE
source_profile;
/*
Transform pixel colors as defined by the color profiles.
*/
cmsSetLogErrorHandler(LCMSExceptionHandler);
source_profile=cmsOpenProfileFromMemTHR((cmsContext) image,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile);
else
{
CacheView
*image_view;
ColorspaceType
source_colorspace,
target_colorspace;
cmsColorSpaceSignature
signature;
cmsHPROFILE
target_profile;
cmsHTRANSFORM
*magick_restrict transform;
cmsUInt32Number
flags,
source_type,
target_type;
ExceptionInfo
*exception;
int
intent;
MagickOffsetType
progress;
size_t
source_channels,
target_channels;
ssize_t
y;
unsigned short
**magick_restrict source_pixels,
**magick_restrict target_pixels;
exception=(&image->exception);
target_profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_profile=source_profile;
source_profile=cmsOpenProfileFromMemTHR((cmsContext) image,
GetStringInfoDatum(icc_profile),(cmsUInt32Number)
GetStringInfoLength(icc_profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
switch (cmsGetColorSpace(source_profile))
{
case cmsSigCmykData:
{
source_colorspace=CMYKColorspace;
source_type=(cmsUInt32Number) TYPE_CMYK_16;
source_channels=4;
break;
}
case cmsSigGrayData:
{
source_colorspace=GRAYColorspace;
source_type=(cmsUInt32Number) TYPE_GRAY_16;
source_channels=1;
break;
}
case cmsSigLabData:
{
source_colorspace=LabColorspace;
source_type=(cmsUInt32Number) TYPE_Lab_16;
source_channels=3;
break;
}
case cmsSigLuvData:
{
source_colorspace=YUVColorspace;
source_type=(cmsUInt32Number) TYPE_YUV_16;
source_channels=3;
break;
}
case cmsSigRgbData:
{
source_colorspace=sRGBColorspace;
source_type=(cmsUInt32Number) TYPE_RGB_16;
source_channels=3;
break;
}
case cmsSigXYZData:
{
source_colorspace=XYZColorspace;
source_type=(cmsUInt32Number) TYPE_XYZ_16;
source_channels=3;
break;
}
case cmsSigYCbCrData:
{
source_colorspace=YCbCrColorspace;
source_type=(cmsUInt32Number) TYPE_YCbCr_16;
source_channels=3;
break;
}
default:
{
source_colorspace=UndefinedColorspace;
source_type=(cmsUInt32Number) TYPE_RGB_16;
source_channels=3;
break;
}
}
signature=cmsGetPCS(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_profile);
switch (signature)
{
case cmsSigCmykData:
{
target_colorspace=CMYKColorspace;
target_type=(cmsUInt32Number) TYPE_CMYK_16;
target_channels=4;
break;
}
case cmsSigLabData:
{
target_colorspace=LabColorspace;
target_type=(cmsUInt32Number) TYPE_Lab_16;
target_channels=3;
break;
}
case cmsSigGrayData:
{
target_colorspace=GRAYColorspace;
target_type=(cmsUInt32Number) TYPE_GRAY_16;
target_channels=1;
break;
}
case cmsSigLuvData:
{
target_colorspace=YUVColorspace;
target_type=(cmsUInt32Number) TYPE_YUV_16;
target_channels=3;
break;
}
case cmsSigRgbData:
{
target_colorspace=sRGBColorspace;
target_type=(cmsUInt32Number) TYPE_RGB_16;
target_channels=3;
break;
}
case cmsSigXYZData:
{
target_colorspace=XYZColorspace;
target_type=(cmsUInt32Number) TYPE_XYZ_16;
target_channels=3;
break;
}
case cmsSigYCbCrData:
{
target_colorspace=YCbCrColorspace;
target_type=(cmsUInt32Number) TYPE_YCbCr_16;
target_channels=3;
break;
}
default:
{
target_colorspace=UndefinedColorspace;
target_type=(cmsUInt32Number) TYPE_RGB_16;
target_channels=3;
break;
}
}
if ((source_colorspace == UndefinedColorspace) ||
(target_colorspace == UndefinedColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == GRAYColorspace) &&
(SetImageGray(image,exception) == MagickFalse))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == CMYKColorspace) &&
(image->colorspace != CMYKColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == XYZColorspace) &&
(image->colorspace != XYZColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == YCbCrColorspace) &&
(image->colorspace != YCbCrColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace != CMYKColorspace) &&
(source_colorspace != GRAYColorspace) &&
(source_colorspace != LabColorspace) &&
(source_colorspace != XYZColorspace) &&
(source_colorspace != YCbCrColorspace) &&
(IssRGBCompatibleColorspace(image->colorspace) == MagickFalse))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
switch (image->rendering_intent)
{
case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break;
case PerceptualIntent: intent=INTENT_PERCEPTUAL; break;
case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break;
case SaturationIntent: intent=INTENT_SATURATION; break;
default: intent=INTENT_PERCEPTUAL; break;
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(image,source_profile,
source_type,target_profile,target_type,intent,flags);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_pixels=AcquirePixelThreadSet(image->columns,source_channels);
target_pixels=AcquirePixelThreadSet(image->columns,target_channels);
if ((source_pixels == (unsigned short **) NULL) ||
(target_pixels == (unsigned short **) NULL))
{
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if (source_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
return(MagickFalse);
}
if (target_colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_colorspace);
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
register unsigned short
*p;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p=source_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=ScaleQuantumToShort(GetPixelRed(q));
if (source_channels > 1)
{
*p++=ScaleQuantumToShort(GetPixelGreen(q));
*p++=ScaleQuantumToShort(GetPixelBlue(q));
}
if (source_channels > 3)
*p++=ScaleQuantumToShort(GetPixelIndex(indexes+x));
q++;
}
cmsDoTransform(transform[id],source_pixels[id],target_pixels[id],
(unsigned int) image->columns);
p=target_pixels[id];
q-=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ScaleShortToQuantum(*p));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
p++;
if (target_channels > 1)
{
SetPixelGreen(q,ScaleShortToQuantum(*p));
p++;
SetPixelBlue(q,ScaleShortToQuantum(*p));
p++;
}
if (target_channels > 3)
{
SetPixelIndex(indexes+x,ScaleShortToQuantum(*p));
p++;
}
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ProfileImage)
#endif
proceed=SetImageProgress(image,ProfileImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_colorspace);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->matte == MagickFalse ? TrueColorType :
TrueColorMatteType;
break;
}
case cmsSigCmykData:
{
image->type=image->matte == MagickFalse ? ColorSeparationType :
ColorSeparationMatteType;
break;
}
case cmsSigGrayData:
{
image->type=image->matte == MagickFalse ? GrayscaleType :
GrayscaleMatteType;
break;
}
default:
break;
}
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if ((status != MagickFalse) ||
(cmsGetDeviceClass(source_profile) != cmsSigLinkClass))
status=SetImageProfile(image,name,profile);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
}
(void) cmsCloseProfile(source_profile);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
if (LocaleCompare(name,"icc") == 0)
{
/*
Continue to support deprecated color profile for now.
*/
image->color_profile.length=0;
image->color_profile.info=(unsigned char *) NULL;
}
if (LocaleCompare(name,"iptc") == 0)
{
/*
Continue to support deprecated IPTC profile for now.
*/
image->iptc_profile.length=0;
image->iptc_profile.info=(unsigned char *) NULL;
}
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
unsigned int *quantum)
{
*quantum=(size_t) (*p++ << 24);
*quantum|=(size_t) (*p++ << 16);
*quantum|=(size_t) (*p++ << 8);
*quantum|=(size_t) (*p++ << 0);
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++ << 8);
*quantum|=(unsigned short) (*p++ << 0);
return(p);
}
static inline void WriteResourceLong(unsigned char *p,
const unsigned int quantum)
{
unsigned char
buffer[4];
buffer[0]=(unsigned char) (quantum >> 24);
buffer[1]=(unsigned char) (quantum >> 16);
buffer[2]=(unsigned char) (quantum >> 8);
buffer[3]=(unsigned char) quantum;
(void) CopyMagickMemory(p,buffer,4);
}
static void WriteTo8BimProfile(Image *image,const char *name,
const StringInfo *profile)
{
const unsigned char
*datum,
*q;
register const unsigned char
*p;
size_t
length;
StringInfo
*profile_8bim;
ssize_t
count;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id,
profile_id;
if (LocaleCompare(name,"icc") == 0)
profile_id=0x040f;
else
if (LocaleCompare(name,"iptc") == 0)
profile_id=0x0404;
else
if (LocaleCompare(name,"xmp") == 0)
profile_id=0x0424;
else
return;
profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,"8bim");
if (profile_8bim == (StringInfo *) NULL)
return;
datum=GetStringInfoDatum(profile_8bim);
length=GetStringInfoLength(profile_8bim);
for (p=datum; p < (datum+length-16); )
{
q=p;
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((count & 0x01) != 0)
count++;
if ((count < 0) || (p > (datum+length-count)) ||
(count > (ssize_t) length))
break;
if (id != profile_id)
p+=count;
else
{
size_t
extent,
offset;
ssize_t
extract_extent;
StringInfo
*extract_profile;
extract_extent=0;
extent=(datum+length)-(p+count);
if (profile == (StringInfo *) NULL)
{
offset=(q-datum);
extract_profile=AcquireStringInfo(offset+extent);
(void) CopyMagickMemory(extract_profile->datum,datum,offset);
}
else
{
offset=(p-datum);
extract_extent=profile->length;
if ((extract_extent & 0x01) != 0)
extract_extent++;
extract_profile=AcquireStringInfo(offset+extract_extent+extent);
(void) CopyMagickMemory(extract_profile->datum,datum,offset-4);
(void) WriteResourceLong(extract_profile->datum+offset-4,
(unsigned int) profile->length);
(void) CopyMagickMemory(extract_profile->datum+offset,
profile->datum,profile->length);
}
(void) CopyMagickMemory(extract_profile->datum+offset+extract_extent,
p+count,extent);
(void) AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString("8bim"),CloneStringInfo(extract_profile));
extract_profile=DestroyStringInfo(extract_profile);
break;
}
}
}
static void GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block)
{
const unsigned char
*datum;
register const unsigned char
*p;
size_t
length;
ssize_t
count;
StringInfo
*profile;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((p > (datum+length-count)) || (count > (ssize_t) length) ||
(count < 0))
break;
switch (id)
{
case 0x03ed:
{
unsigned int
resolution;
unsigned short
units;
/*
Resolution.
*/
p=ReadResourceLong(p,&resolution);
image->x_resolution=((double) resolution)/65536.0;
p=ReadResourceShort(p,&units)+2;
p=ReadResourceLong(p,&resolution)+4;
image->y_resolution=((double) resolution)/65536.0;
/*
Values are always stored as pixels per inch.
*/
if ((ResolutionType) units != PixelsPerCentimeterResolution)
image->units=PixelsPerInchResolution;
else
{
image->units=PixelsPerCentimeterResolution;
image->x_resolution/=2.54;
image->y_resolution/=2.54;
}
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"iptc",profile,MagickTrue);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"icc",profile,MagickTrue);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"exif",profile,MagickTrue);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"xmp",profile,MagickTrue);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
}
static MagickBooleanType SetImageProfileInternal(Image *image,const char *name,
const StringInfo *profile,const MagickBooleanType recursive)
{
char
key[MaxTextExtent],
property[MaxTextExtent];
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MaxTextExtent);
LocaleLower(key);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),CloneStringInfo(profile));
if ((status != MagickFalse) &&
((LocaleCompare(name,"icc") == 0) || (LocaleCompare(name,"icm") == 0)))
{
const StringInfo
*icc_profile;
/*
Continue to support deprecated color profile member.
*/
icc_profile=GetImageProfile(image,name);
if (icc_profile != (const StringInfo *) NULL)
{
image->color_profile.length=GetStringInfoLength(icc_profile);
image->color_profile.info=GetStringInfoDatum(icc_profile);
}
}
if ((status != MagickFalse) &&
((LocaleCompare(name,"iptc") == 0) || (LocaleCompare(name,"8bim") == 0)))
{
const StringInfo
*iptc_profile;
/*
Continue to support deprecated IPTC profile member.
*/
iptc_profile=GetImageProfile(image,name);
if (iptc_profile != (const StringInfo *) NULL)
{
image->iptc_profile.length=GetStringInfoLength(iptc_profile);
image->iptc_profile.info=GetStringInfoDatum(iptc_profile);
}
}
if (status != MagickFalse)
{
if (LocaleCompare(name,"8bim") == 0)
GetProfilesFromResourceBlock(image,profile);
else if (recursive == MagickFalse)
WriteTo8BimProfile(image,name,profile);
}
/*
Inject profile into image properties.
*/
(void) FormatLocaleString(property,MaxTextExtent,"%s:*",name);
(void) GetImageProperty(image,property);
return(status);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile)
{
return(SetImageProfileInternal(image,name,profile,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline unsigned short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) ((buffer[1] << 8) | buffer[0]);
return((unsigned short) (value & 0xffff));
}
value=(unsigned short) ((((unsigned char *) buffer)[0] << 8) |
((unsigned char *) buffer)[1]);
return((unsigned short) (value & 0xffff));
}
static inline unsigned int ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
unsigned int
value;
if (endian == LSBEndian)
{
value=(unsigned int) ((buffer[3] << 24) | (buffer[2] << 16) |
(buffer[1] << 8 ) | (buffer[0]));
return((unsigned int) (value & 0xffffffff));
}
value=(unsigned int) ((buffer[0] << 24) | (buffer[1] << 16) |
(buffer[2] << 8) | buffer[3]);
return((unsigned int) (value & 0xffffffff));
}
static inline unsigned int ReadProfileMSBLong(unsigned char **p,size_t *length)
{
unsigned int
value;
if (*length < 4)
return(0);
value=ReadProfileLong(MSBEndian,*p);
(*length)-=4;
*p+=4;
return(value);
}
static inline unsigned short ReadProfileMSBShort(unsigned char **p,
size_t *length)
{
unsigned short
value;
if (*length < 2)
return(0);
value=ReadProfileShort(MSBEndian,*p);
(*length)-=2;
*p+=2;
return(value);
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) CopyMagickMemory(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) CopyMagickMemory(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) CopyMagickMemory(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) CopyMagickMemory(p,buffer,2);
}
static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile)
{
size_t
length;
ssize_t
count;
unsigned char
*p;
unsigned short
id;
length=GetStringInfoLength(profile);
p=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&p,&length) != 0x38)
continue;
if (ReadProfileByte(&p,&length) != 0x42)
continue;
if (ReadProfileByte(&p,&length) != 0x49)
continue;
if (ReadProfileByte(&p,&length) != 0x4D)
continue;
if (length < 7)
return(MagickFalse);
id=ReadProfileMSBShort(&p,&length);
count=(ssize_t) ReadProfileByte(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
p+=count;
if ((*p & 0x01) == 0)
(void) ReadProfileByte(&p,&length);
count=(ssize_t) ReadProfileMSBLong(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
if ((id == 0x3ED) && (count == 16))
{
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian, (unsigned int) (image->x_resolution*2.54*
65536.0),p);
else
WriteProfileLong(MSBEndian, (unsigned int) (image->x_resolution*
65536.0),p);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4);
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian, (unsigned int) (image->y_resolution*2.54*
65536.0),p+8);
else
WriteProfileLong(MSBEndian, (unsigned int) (image->y_resolution*
65536.0),p+8);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12);
}
p+=count;
length-=count;
}
return(MagickTrue);
}
static MagickBooleanType SyncExifProfile(Image *image, StringInfo *profile)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
if ((id != 0x4949) && (id != 0x4D4D))
{
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
}
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ((int) ReadProfileLong(endian,exif+4));
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
register unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
components,
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format-1) >= EXIF_NUM_FORMATS)
break;
components=(ssize_t) ((int) ReadProfileLong(endian,q+4));
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ((int) ReadProfileLong(endian,q+8));
if ((ssize_t) (offset+number_bytes) < offset)
continue; /* prevent overflow */
if ((size_t) (offset+number_bytes) > length)
continue;
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->x_resolution+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->y_resolution+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
offset=(ssize_t) ((int) ReadProfileLong(endian,p));
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ((int) ReadProfileLong(endian,directory+2+(12*
number_entries)));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageProfiles(Image *image)
{
MagickBooleanType
status;
StringInfo
*profile;
status=MagickTrue;
profile=(StringInfo *) GetImageProfile(image,"8BIM");
if (profile != (StringInfo *) NULL)
if (Sync8BimProfile(image,profile) == MagickFalse)
status=MagickFalse;
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile != (StringInfo *) NULL)
if (SyncExifProfile(image,profile) == MagickFalse)
status=MagickFalse;
return(status);
}
|
part_func_co.c | /*
* partiton function for RNA secondary structures
*
* Ivo L Hofacker
* Stephan Bernhart
* Ronny Lorenz
* ViennaRNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h> /* #defines FLT_MAX ... */
#include <limits.h>
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/utils/structures.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/plotting/probabilities.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/mfe.h"
#include "ViennaRNA/part_func.h"
#include "ViennaRNA/part_func_co.h"
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
#ifdef _OPENMP
#include <omp.h>
#endif
#endif
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
int mirnatog = 0;
double F_monomer[2] = {
0, 0
}; /* free energies of the two monomers */
#endif
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
/* some backward compatibility stuff */
PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL;
PRIVATE int backward_compat = 0;
#ifdef _OPENMP
#pragma omp threadprivate(backward_compat_compound, backward_compat)
#endif
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
PRIVATE vrna_dimer_pf_t
wrap_co_pf_fold(char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained);
#endif
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PUBLIC vrna_dimer_pf_t
vrna_pf_co_fold(const char *seq,
char *structure,
vrna_ep_t **pl)
{
double mfe;
vrna_dimer_pf_t X;
vrna_fold_compound_t *vc;
vrna_md_t md;
vrna_md_set_default(&md);
/* no need to backtrack MFE structure */
md.backtrack = 0;
if (pl)
md.compute_bpp = 1;
else
md.compute_bpp = 0;
vc = vrna_fold_compound(seq, &md, VRNA_OPTION_DEFAULT);
mfe = (double)vrna_mfe_dimer(vc, NULL);
vrna_exp_params_rescale(vc, &mfe);
X = vrna_pf_dimer(vc, structure);
if (pl)
*pl = vrna_plist_from_probs(vc, /*cut_off:*/ 1e-6);
vrna_fold_compound_free(vc);
return X;
}
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
/*
*****************************************
* BEGIN backward compatibility wrappers *
*****************************************
*/
PRIVATE vrna_dimer_pf_t
wrap_co_pf_fold(char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained)
{
int length;
char *seq;
vrna_fold_compound_t *vc;
vrna_md_t md;
vc = NULL;
length = strlen(sequence);
seq = (char *)vrna_alloc(sizeof(char) * (length + 2));
if (cut_point > -1) {
int i;
for (i = 0; i < cut_point - 1; i++)
seq[i] = sequence[i];
seq[i] = '&';
for (; i < (int)length; i++)
seq[i + 1] = sequence[i];
} else {
/* this ensures the allocation of all cofold matrices via vrna_fold_compound_t */
free(seq);
seq = strdup(sequence);
}
/*
* if present, extract model details from provided parameters variable,
* to properly initialize the fold compound. Otherwise use default
* settings taken from deprecated global variables
*/
if (parameters)
vrna_md_copy(&md, &(parameters->model_details));
else
set_model_details(&md);
/* set min_loop_size and backtracing options */
md.compute_bpp = calculate_bppm;
md.min_loop_size = 0;
vc = vrna_fold_compound(seq, &md, VRNA_OPTION_DEFAULT);
/*
* if present, attach a copy of the parameters structure instead of the
* default parameters but take care of re-setting it to (initialized)
* model details
*/
free(vc->exp_params);
if (parameters) {
vrna_md_copy(&(parameters->model_details), &(vc->params->model_details));
vc->exp_params = vrna_exp_params_copy(parameters);
} else {
vc->exp_params = vrna_exp_params(&(vc->params->model_details));
}
/* propagate global pf_scale into vc->exp_params */
vc->exp_params->pf_scale = pf_scale;
if (is_constrained && structure) {
unsigned int constraint_options = 0;
constraint_options |= VRNA_CONSTRAINT_DB
| VRNA_CONSTRAINT_DB_PIPE
| VRNA_CONSTRAINT_DB_DOT
| VRNA_CONSTRAINT_DB_X
| VRNA_CONSTRAINT_DB_ANG_BRACK
| VRNA_CONSTRAINT_DB_RND_BRACK;
vrna_constraints_add(vc, (const char *)structure, constraint_options);
}
if (backward_compat_compound)
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = vc;
backward_compat = 1;
iindx = backward_compat_compound->iindx;
free(seq);
return vrna_pf_dimer(vc, structure);
}
/*
*****************************************
* END backward compatibility wrappers *
*****************************************
*/
/*
* ###########################################
* # deprecated functions below #
*###########################################
*/
PUBLIC vrna_dimer_pf_t
co_pf_fold(char *sequence,
char *structure)
{
return wrap_co_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained);
}
PUBLIC vrna_dimer_pf_t
co_pf_fold_par(char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained)
{
return wrap_co_pf_fold(sequence, structure, parameters, calculate_bppm, is_constrained);
}
PUBLIC vrna_ep_t *
get_plist(vrna_ep_t *pl,
int length,
double cut_off)
{
int i, j, n, count, *my_iindx;
my_iindx = backward_compat_compound->iindx;
/* get pair probibilities out of pr array */
count = 0;
n = 2;
for (i = 1; i < length; i++) {
for (j = i + 1; j <= length; j++) {
if (pr[my_iindx[i] - j] < cut_off)
continue;
if (count == n * length - 1) {
n *= 2;
pl = (vrna_ep_t *)vrna_realloc(pl, n * length * sizeof(vrna_ep_t));
}
pl[count].i = i;
pl[count].j = j;
pl[count++].p = pr[my_iindx[i] - j];
/* printf("gpl: %2d %2d %.9f\n",i,j,pr[my_iindx[i]-j]); */
}
}
pl[count].i = 0;
pl[count].j = 0; /* ->?? */
pl[count++].p = 0.;
pl = (vrna_ep_t *)vrna_realloc(pl, (count) * sizeof(vrna_ep_t));
return pl;
}
PUBLIC void
compute_probabilities(double FAB,
double FA,
double FB,
vrna_ep_t *prAB,
vrna_ep_t *prA,
vrna_ep_t *prB,
int Alength)
{
if (backward_compat_compound && backward_compat) {
vrna_pf_dimer_probs(FAB,
FA,
FB,
prAB,
(const vrna_ep_t *)prA,
(const vrna_ep_t *)prB,
Alength,
(const vrna_exp_param_t *)backward_compat_compound->exp_params);
}
}
PUBLIC vrna_dimer_conc_t *
get_concentrations(double FcAB,
double FcAA,
double FcBB,
double FEA,
double FEB,
double *startconc)
{
return vrna_pf_dimer_concentrations(FcAB,
FcAA,
FcBB,
FEA,
FEB,
(const double *)startconc,
(const vrna_exp_param_t *)backward_compat_compound->exp_params);
}
PUBLIC void
init_co_pf_fold(int length)
{
/* DO NOTHING */
}
PUBLIC void
free_co_pf_arrays(void)
{
if (backward_compat_compound && backward_compat) {
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = NULL;
backward_compat = 0;
}
}
PUBLIC FLT_OR_DBL *
export_co_bppm(void)
{
if (backward_compat_compound)
return backward_compat_compound->exp_matrices->probs;
else
return NULL;
}
PUBLIC void
update_co_pf_params(int length)
{
if (backward_compat_compound && backward_compat) {
vrna_md_t md;
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
PUBLIC void
update_co_pf_params_par(int length,
vrna_exp_param_t *parameters)
{
if (backward_compat_compound && backward_compat) {
vrna_md_t md;
if (parameters) {
vrna_exp_params_subst(backward_compat_compound, parameters);
} else {
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
}
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
#endif
|
learn.c | /**************************************************************/
/*! \File learn.c
\brief This is the file containing the fundamental functions
to learn the model, used by GLSLIM and GLSLIMr0.
\author Evangelia Christakopoulou
\version 1.0
\date 2016
*/
/**************************************************************/
#include<slim.h>
/*****************************************************************************/
/*! \brief This function creates a new training matrix A'' and then
calls the main function local_learn, for computing the model.
Original training matrix A is of size n * m (rows * cols)
An intermediate training matrix A' is created which has:
nrows = rows of the original training matrix A
ncols = (number_of_clusters + 1) * cols of the original matrix A
Every user has then exactly the nnzs of A
plus the same nnzs copied for the cluster in which he belongs.
For all the other clusters he has 0 entries.
In order to be able to regularize properly, we add underneath A'
a diagonal matrix which is of size: ncols * ncols.
This diagonal matrix contains the regularization params across the
diagonal. For the submatrix m * m (cols * cols) it has the global
regularization (lg) across the diagonal and for the rest,
it has the local regularization (ll).
The final matrix is A''.
Example: for 3 clusters:
m m m m
n [ ] [ ] [ b ] n
m [ lg ] [ w ] [ 0 ] m
m [ ll ] * [ ] = [ 0 ] m
m [ ll ] [ ] [ 0 ] m
m [ ll ] [ 0 ] m
w is of size
A'' 4m * 1.
\param[in] ctrl The ctrl structure.
\param[in] train The training data A.
\param[in] participation The assignment of users to clusters.
\param[in] g The vector with the weights of the users
\param[in] prev_model The previous model (used to expedite the learning)
\return model The model learnt.
*/
/*****************************************************************************/
gk_csr_t *learn(ctrl_t * ctrl, gk_csr_t * train, int *participation,
double *g, gk_csr_t * prev_model)
{
int i, pos, j, offset, global_nnz;
gk_csr_t *mat, *model = NULL;
global_nnz = train->rowptr[train->nrows] - train->rowptr[0];
mat = gk_csr_Create();
mat->ncols = (ctrl->num_clusters + 1) * train->ncols;
mat->nrows = train->nrows + mat->ncols;
mat->rowptr = gk_zmalloc(mat->nrows + 1, "gk_csr_Dep: local_rowptr");
mat->rowind =
gk_imalloc(2 * global_nnz + mat->ncols, "gk_csr_Dep: local_rowind");
mat->rowval =
gk_fmalloc(2 * global_nnz + mat->ncols, "gk_csr_Dep: local_rowval");
mat->rowptr[0] = 0;
for (i = 0, pos = 0; i < train->nrows; i++) {
/* Copying the original training matrix A */
for (j = train->rowptr[i]; j < train->rowptr[i + 1]; j++) {
mat->rowind[pos] = train->rowind[j];
mat->rowval[pos] = train->rowval[j] * g[i];
pos++;
}
/* Copying the nnzs of the user to the cluster to which he belongs */
offset = (participation[i] + 1) * train->ncols;
for (j = train->rowptr[i]; j < train->rowptr[i + 1]; j++) {
mat->rowind[pos] = train->rowind[j] + offset;
mat->rowval[pos] = train->rowval[j] * (1 - g[i]);
pos++;
}
mat->rowptr[i + 1] = pos;
}
/* Adding the diagonal matrix underneath A' */
/* Global regularization */
for (i = train->nrows; i < train->nrows + train->ncols; i++, pos++) {
mat->rowind[pos] = i - train->nrows;
mat->rowval[pos] = ctrl->beta;
mat->rowptr[i] = pos;
}
/* Local regularization */
for (i = train->nrows + train->ncols; i < mat->nrows; i++, pos++) {
mat->rowind[pos] = i - train->nrows;
mat->rowval[pos] = ctrl->local_beta;
mat->rowptr[i] = pos;
}
mat->rowptr[mat->nrows] = pos;
gk_csr_CreateIndex(mat, GK_CSR_COL);
/* Learning the model */
model = local_learn(ctrl, mat, prev_model);
gk_csr_Free(&mat);
return model;
}
/**************************************************************/
/*! \brief Learning
\details This routine contains the learning algorithm used by GLSLIM and
GLSLIMr0.
\param[in] ctrl A ctrl structure which contains all the parameters
\param[in] train The training data A''
\param[in] prev_model The previous model- used to speed up
learning. It is optional argument.
\return model The model returned.
*/
/**************************************************************/
gk_csr_t *local_learn(ctrl_t * ctrl, gk_csr_t * train,
gk_csr_t * prev_model)
{
int i, nr, nc, ni, pos, j, ii;
int global_nnz;
int basestart, baseend, datasize, step, starti, endi;
gk_csr_t *mat = NULL;
double *bl, *bu, *c;
int *iinds, *jinds;
float *vals;
int *nnzs = NULL;
int *rinds1 = NULL;
int *rinds = NULL;
int *rjinds = NULL;
float *rvals = NULL;
int rank = 0;
int max_nnzs = 0;
double tmr;
int *rrowcnt = NULL;
/* set up timers */
gk_clearwctimer(tmr);
gk_startwctimer(tmr);
/* constants used across all problems */
nr = train->nrows;
nc = train->ncols;
ni = train->ncols / (ctrl->num_clusters + 1);
/* mallocing */
bl = gk_dsmalloc(nc, ctrl->bl, "malloc bl"); /*lower bound */
bu = gk_dsmalloc(nc, ctrl->bu, "malloc bu"); /*upper bound */
c = gk_dmalloc(nc, "malloc c"); /*linear vector */
gk_dset(ni, ctrl->lambda, c);
gk_dset(nc - ni, ctrl->local_lambda, c + ni);
/*starting and ending columns */
basestart = (ctrl->starti >= 0) ? ctrl->starti : 0;
baseend = (ctrl->endi >= 0) ? ctrl->endi : ni;
datasize = baseend - basestart;
step = (datasize / ctrl->num_procs) +
(ctrl->id < (datasize % ctrl->num_procs) ? 1 : 0);
starti = ((datasize / ctrl->num_procs) * ctrl->id) +
gk_min(ctrl->id, datasize % ctrl->num_procs);
endi = starti + step;
if ((endi < datasize) && (ctrl->id == ctrl->num_procs - 1)) {
endi = datasize;
step = datasize - starti;
}
pos = 0;
iinds = gk_ismalloc(step * nc, 0, "malloc iinds");
jinds = gk_ismalloc(step * nc, 0, "malloc jinds");
vals = gk_fsmalloc(step * nc, 0, "malloc vals");
/* go through all columns */
#pragma omp parallel num_threads(ctrl->num_threads)
{
int mypos;
double *w, *b;
wspace_t *myws;
BCLS *ls;
myws = (wspace_t *) gk_malloc(sizeof(wspace_t), "myws");
myws->mat = train;
myws->ncols = ni;
ls = bcls_create_prob(nr, nc);
w = gk_dsmalloc(nc, 0, "malloc w");
b = gk_dsmalloc(nr, 0, "malloc b");
#pragma omp for private(i, j) schedule(dynamic)
for (i = starti; i < endi; i++) {
// this column is totally empty
if (train->colptr[i + 1] - train->colptr[i] == 0) {
continue;
}
/**********************************************************/
/* BCLS learning */
/**********************************************************/
/* get the i-th column from A */
for (j = train->colptr[i]; j < train->colptr[i + 1] - 1; j++) {
ii = train->colind[j];
b[ii] = 1;
}
myws->max_bcls_niters = gk_min(ctrl->max_bcls_niters,
50 * (train->colptr[i + 1] -
train->colptr[i]));
gk_dset(nc, 0, w);
// disable
myws->acol = i;
if (prev_model != NULL) {
get_row(prev_model, i, w);
}
bcsol(ctrl, b, w, myws, bl, bu, 0, c, ls);
for (j = train->colptr[i]; j < train->colptr[i + 1] - 1; j++) {
ii = train->colind[j];
b[ii] = 0;
}
/**********************************************************/
/* dump the data */
/**********************************************************/
/* compute the triplets */
#pragma omp critical
{
for (j = 0; j < nc; j++) {
if (w[j] > EPSILON) {
mypos = pos++;
iinds[mypos] = i;
jinds[mypos] = j;
vals[mypos] = w[j];
}
}
}
} // end of starti - endi
bcls_free_prob(ls);
gk_free((void **) &myws, (void **) &b, (void **) &w, LTERM);
}
gk_stopwctimer(tmr);
/* if(ctrl->id == 0){
printf("time passed is %f\n", gk_getwctimer(tmr));
}*/
/**********************************************************/
/* Combine all the mat of the different processes
to the total_mat with MPI */
/**********************************************************/
if (ctrl->id == 0) {
nnzs = gk_imalloc(ctrl->num_procs, "malloc nnzs");
gk_iset(ctrl->num_procs, 0, nnzs);
}
MPI_Gather(&pos, 1, MPI_INT, nnzs, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (ctrl->id == 0) {
global_nnz = 0;
for (i = 0; i < ctrl->num_procs; i++) {
global_nnz += nnzs[i];
}
}
MPI_Bcast(&global_nnz, 1, MPI_INT, 0, MPI_COMM_WORLD);
/* Finding the max nnzs between all nodes. This covers the case
when a node which is not 0 has a bigger iinds/jinds/vals matrix
than the one in node 0 */
if (ctrl->id == 0) {
max_nnzs = nnzs[0];
for (rank = 1; rank < ctrl->num_procs; rank++)
if (nnzs[rank] > max_nnzs)
max_nnzs = nnzs[rank];
}
/* Each node creates its own row count. This gets sent to node 0, in order
to create the total rowptr. */
if (global_nnz / ctrl->num_procs >= ni) {
rrowcnt = gk_ismalloc(ni, 0, "rrowcnt");
for (i = 0; i < pos; i++) {
rrowcnt[iinds[i]]++;
}
}
/* Every node sends its own iinds, jinds and vals. */
if (ctrl->id != 0) {
if (global_nnz / ctrl->num_procs < ni) {
MPI_Send(iinds, pos, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
else {
MPI_Send(rrowcnt, ni, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
MPI_Send(iinds, pos, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(jinds, pos, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(vals, pos, MPI_FLOAT, 0, 0, MPI_COMM_WORLD);
}
if (ctrl->id == 0) {
if (global_nnz / ctrl->num_procs < ni) {
rinds1 = gk_icopy(nnzs[0], iinds, gk_imalloc(max_nnzs, "rinds1"));
}
rinds = gk_icopy(nnzs[0], iinds, gk_imalloc(max_nnzs, "rinds"));
rjinds = gk_icopy(nnzs[0], jinds, gk_imalloc(max_nnzs, "rjinds"));
rvals = gk_fcopy(nnzs[0], vals, gk_fmalloc(max_nnzs, "rvals"));
}
gk_free((void **) &iinds, &jinds, &vals, &bl, &bu, &c, LTERM);
/* Allocate and populate matrix */
mat = gk_csr_Create();
mat->nrows = ni;
mat->ncols = nc;
mat->rowptr = gk_zsmalloc(ni + 1, 0, "rowptr");
mat->rowind = gk_imalloc(global_nnz, "rowind");
mat->rowval = gk_fmalloc(global_nnz, "rowval");
if (ctrl->id == 0) {
if (global_nnz / ctrl->num_procs < ni) {
for (rank = 0; rank < ctrl->num_procs; rank++) {
if (rank != 0) {
MPI_Recv(rinds1, nnzs[rank], MPI_INT, rank, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
for (i = 0; i < nnzs[rank]; i++) {
mat->rowptr[rinds1[i]]++;
}
}
}
else {
for (rank = 0; rank < ctrl->num_procs; rank++) {
if (rank != 0) {
MPI_Recv(rrowcnt, ni, MPI_INT, rank, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
}
for (i = 0; i < ni; i++) {
mat->rowptr[i] += rrowcnt[i];
}
}
}
MAKECSR(i, mat->nrows, mat->rowptr);
for (rank = 0; rank < ctrl->num_procs; rank++) {
if (rank != 0) {
MPI_Recv(rinds, nnzs[rank], MPI_INT, rank, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(rjinds, nnzs[rank], MPI_INT, rank, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(rvals, nnzs[rank], MPI_FLOAT, rank, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
for (i = 0; i < nnzs[rank]; i++) {
mat->rowind[mat->rowptr[rinds[i]]] = rjinds[i];
mat->rowval[mat->rowptr[rinds[i]]] = rvals[i];
mat->rowptr[rinds[i]]++;
}
}
SHIFTCSR(i, mat->nrows, mat->rowptr);
gk_free((void **) &rinds, &rjinds, &rvals,
&nnzs, LTERM);
if (global_nnz / ctrl->num_procs < ni) {
gk_free((void **) &rinds1, LTERM);
}
}
if (rrowcnt != NULL) {
gk_free((void **) &rrowcnt, LTERM);
}
/* Broadcast the matrix to the other nodes */
MPI_Bcast(mat->rowptr, ni + 1, MPI_LONG, 0, MPI_COMM_WORLD);
MPI_Bcast(mat->rowind, global_nnz, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(mat->rowval, global_nnz, MPI_FLOAT, 0, MPI_COMM_WORLD);
return mat;
}
|
GB_binop__iseq_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__iseq_int32
// A.*B function (eWiseMult): GB_AemultB__iseq_int32
// A*D function (colscale): GB_AxD__iseq_int32
// D*A function (rowscale): GB_DxB__iseq_int32
// C+=B function (dense accum): GB_Cdense_accumB__iseq_int32
// C+=b function (dense accum): GB_Cdense_accumb__iseq_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_int32
// C=scalar+B GB_bind1st__iseq_int32
// C=scalar+B' GB_bind1st_tran__iseq_int32
// C=A+scalar GB_bind2nd__iseq_int32
// C=A'+scalar GB_bind2nd_tran__iseq_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT32 || GxB_NO_ISEQ_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__iseq_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__iseq_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__iseq_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__iseq_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__iseq_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__iseq_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__iseq_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__iseq_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__iseq_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__iseq_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__iseq_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
residual_based_bdf_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_BASED_BDF_SCHEME )
#define KRATOS_RESIDUAL_BASED_BDF_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "includes/checks.h"
#include "utilities/time_discretization.h"
#include "solving_strategies/schemes/residual_based_implicit_time_scheme.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedBDFScheme
* @ingroup KratosCore
* @brief BDF integration scheme (for dynamic problems)
* @details The \f$ n \f$ order Backward Differentiation Formula (BDF) method is a two step \f$ n \f$ order accurate method.
* This scheme is designed to solve a system of the type:
*\f[
* \mathbf{M} \frac{d^2(u_{n0})}{dt^2} + \mathbf{D} \frac{d(un0)}{dt} + \mathbf{K} u_{n0} = \mathbf{f}_{ext}
* \f]
*
* If we call:
*
* - Second derivative:
* -# \f$ \ddot{u}_{ni} \f$ the second derivative at the step i
* - First derivative:
* -# \f$ \dot{u}_{ni} \f$ the first derivative at the step i
* - Third derivative:
* -# \f$ u_{ni} \f$ the variable at the step i
*
* Then we assume:
* \f[ \frac{d^2(u_{n0})}{dt^2} \|t_{n0} = \sum_i c_i \dot{u}_{ni} \f]
* \f[ \frac{d(u_{n0})}{dt} \|t_{n0} = \sum_i c_i u_{n0} \f]
* with for order 2 (BDF2):
* -# \f$ c_0 = \frac{1.5}{dt} \f$
* -# \f$ c_1 = \frac{-2.0}{dt} \f$
* -# \f$ c_2 = \frac{0.5}{dt} \f$
*
* The LHS and RHS can be defined as:
* \f[ RHS = \mathbf{f}_{ext} - \mathbf{M} \frac{d(\dot{u}_{n0})}{dt} - \mathbf{D} \frac{d(u_{n0})}{dt} - \mathbf{K} u_{n0} \f]
* and
* \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2 \mathbf{M} + c_0 \mathbf{D} + K \f]
* @note This implies that elements are expected to be written in terms
* of a variable with two time derivatives
* <a href="https://mediatum.ub.tum.de/doc/1223319/80942.pdf">Main reference</a>
* @todo Create a BibTeX file https://www.stack.nl/~dimitri/doxygen/manual/commands.html#cmdcite
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace>
class ResidualBasedBDFScheme
: public ResidualBasedImplicitTimeScheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedBDFScheme );
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename BaseType::Pointer BaseTypePointer;
typedef ResidualBasedImplicitTimeScheme<TSparseSpace,TDenseSpace> ImplicitBaseType;
typedef typename ImplicitBaseType::TDataType TDataType;
typedef typename ImplicitBaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename ImplicitBaseType::TSystemMatrixType TSystemMatrixType;
typedef typename ImplicitBaseType::TSystemVectorType TSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef ModelPart::NodesContainerType NodesArrayType;
/// Definition of epsilon
static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor. The BDF method
* @param Order The integration order
* @todo The ideal would be to use directly the dof or the variable itself to identify the type of variable and is derivatives
*/
explicit ResidualBasedBDFScheme(const std::size_t Order = 2)
:ImplicitBaseType(),
mOrder(Order),
mpBDFUtility(Kratos::make_unique<TimeDiscretization::BDF>(Order))
{
// Allocate auxiliary memory
const std::size_t num_threads = ParallelUtilities::GetNumThreads();
mVector.dotun0.resize(num_threads);
mVector.dot2un0.resize(num_threads);
// Doing a minimal check
KRATOS_ERROR_IF(mOrder < 1) << "ERROR:: Not possible to compute a BDF of order less than 1" << std::endl;
// We resize the BDF coefficients
if (mBDF.size() != (mOrder + 1))
mBDF.resize(mOrder + 1);
}
/** Copy Constructor.
*/
explicit ResidualBasedBDFScheme(ResidualBasedBDFScheme& rOther)
:ImplicitBaseType(rOther)
,mOrder(rOther.mOrder)
,mBDF(rOther.mBDF)
,mVector(rOther.mVector)
,mpBDFUtility(nullptr)
{
Kratos::unique_ptr<TimeDiscretization::BDF> auxiliar_pointer = Kratos::make_unique<TimeDiscretization::BDF>(mOrder);
mpBDFUtility.swap(auxiliar_pointer);
}
/**
* Clone
*/
BaseTypePointer Clone() override
{
return BaseTypePointer( new ResidualBasedBDFScheme(*this) );
}
/** Destructor.
*/
~ResidualBasedBDFScheme
() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Performing the update of the solution
* @details Incremental update within newton iteration. It updates the state variables at the end of the time step
* \f[ u_{n+1}^{k+1}= u_{n+1}^{k}+ \Delta u\f]
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
// Update of displacement (by DOF)
mpDofUpdater->UpdateDofs(rDofSet, rDx);
UpdateDerivatives(rModelPart, rDofSet, rA, rDx, rb);
KRATOS_CATCH( "" );
}
/**
* @brief Performing the prediction of the solution
* @details It predicts the solution for the current step x = xold + vold * Dt
* @param rModelPart The model of the problem to solve
* @param rDofSet set of all primary variables
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
KRATOS_ERROR << "Calling base BDF class" << std::endl;
KRATOS_CATCH( "" );
}
/**
* @brief It initializes time step solution. Only for reasons if the time step solution is restarted
* @param rModelPart The model of the problem to solve
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
* @todo I cannot find the formula for the higher orders with variable time step. I tried to deduce by myself but the result was very unstable
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
ImplicitBaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
mpBDFUtility->ComputeAndSaveBDFCoefficients(r_current_process_info);
mBDF = r_current_process_info[BDF_COEFFICIENTS];
KRATOS_WARNING_IF("ResidualBasedBDFScheme", mOrder > 2)
<< "For higher orders than 2 the time step is assumed to be constant.\n";
KRATOS_CATCH( "" );
}
/**
* @brief This function is designed to be called once to perform all the checks needed on the input provided.
* @details Checks can be "expensive" as the function is designed to catch user's errors.
* @param rModelPart The model of the problem to solve
* @return Zero means all ok
*/
int Check(const ModelPart& rModelPart) const override
{
KRATOS_TRY;
const int err = ImplicitBaseType::Check(rModelPart);
if(err!=0) return err;
// Check for minimum value of the buffer index
// Verify buffer size
KRATOS_ERROR_IF(rModelPart.GetBufferSize() < mOrder + 1) << "Insufficient buffer size. Buffer size should be greater than " << mOrder + 1 << ". Current size is " << rModelPart.GetBufferSize() << std::endl;
KRATOS_CATCH( "" );
return 0;
}
/// Free memory allocated by this class.
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "base_bdf_scheme",
"integration_order" : 2
})");
// Getting base class default parameters
const Parameters base_default_parameters = ImplicitBaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBDFScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
struct GeneralVectors
{
std::vector< Vector > dotun0; /// First derivative
std::vector< Vector > dot2un0; /// Second derivative
};
const std::size_t mOrder; /// The integration order
Vector mBDF; /// The BDF coefficients
GeneralVectors mVector; /// The structure containing the derivatives
Kratos::unique_ptr<TimeDiscretization::BDF> mpBDFUtility; /// Utility to compute BDF coefficients
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Performing the update of the derivatives
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
inline void UpdateDerivatives(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
)
{
// Updating time derivatives (nodally for efficiency)
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
// Getting first node iterator
const auto it_node_begin = rModelPart.Nodes().begin();
#pragma omp parallel for
for(int i = 0; i< num_nodes; ++i) {
auto it_node = it_node_begin + i;
UpdateFirstDerivative(it_node);
UpdateSecondDerivative(it_node);
}
}
/**
* @brief Updating first time derivative (velocity)
* @param itNode the node interator
*/
virtual inline void UpdateFirstDerivative(NodesArrayType::iterator itNode)
{
KRATOS_ERROR << "Calling base BDF class" << std::endl;
}
/**
* @brief Updating second time derivative (acceleration)
* @param itNode the node interator
*/
virtual inline void UpdateSecondDerivative(NodesArrayType::iterator itNode)
{
KRATOS_ERROR << "Calling base BDF class" << std::endl;
}
/**
* @brief It adds the dynamic LHS contribution of the elements
* \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2\mathbf{M} + c_0 \mathbf{D} + \mathbf{K} \f]
* @param rLHS_Contribution The dynamic contribution for the LHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToLHS(
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
// Adding mass contribution to the dynamic stiffness
if (rM.size1() != 0) { // if M matrix declared
noalias(rLHS_Contribution) += rM * std::pow(mBDF[0], 2);
}
// Adding damping contribution
if (rD.size1() != 0) { // if D matrix declared
noalias(rLHS_Contribution) += rD * mBDF[0];
}
}
/**
* @brief It adds the dynamic RHS contribution of the objects
* \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f]
* @param rObject The object to compute
* @param rRHS_Contribution The dynamic contribution for the RHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
template <class TObjectType>
void TemplateAddDynamicsToRHS(
TObjectType& rObject,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
)
{
const std::size_t this_thread = OpenMPUtils::ThisThread();
const auto& r_const_obj_ref = rObject;
// Adding inertia contribution
if (rM.size1() != 0) {
r_const_obj_ref.GetSecondDerivativesVector(mVector.dot2un0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rM, mVector.dot2un0[this_thread]);
}
// Adding damping contribution
if (rD.size1() != 0) {
r_const_obj_ref.GetFirstDerivativesVector(mVector.dotun0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rD, mVector.dotun0[this_thread]);
}
}
/**
* @brief It adds the dynamic RHS contribution of the elements
* \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f]
* @param rElement The element to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Element& rElement,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
TemplateAddDynamicsToRHS<Element>(rElement, rRHS_Contribution, rD, rM, rCurrentProcessInfo);
}
/**
* @brief It adds the dynamic RHS contribution of the condition
* \f[ RHS = f_{ext} - \ddot{u}_{n0} \mathbf{M} + \dot{u}_{n0} \mathbf{D} + u_{n0} \mathbf{K} \f]
* @param rCondition The condition to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Condition& rCondition,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
TemplateAddDynamicsToRHS<Condition>(rCondition, rRHS_Contribution, rD, rM, rCurrentProcessInfo);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
/// Utility class to perform the update after solving the system, will be different in MPI runs.
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater();
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBDFScheme */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BDF_SCHEME defined */
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 8;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz-4,8)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(16*t1+Ny+29,8)),floord(32*t2+Ny+28,8)),floord(32*t1-32*t2+Nz+Ny+27,8));t3++) {
for (t4=max(max(max(0,ceild(t1-7,8)),ceild(32*t2-Nz-124,128)),ceild(8*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(16*t1+Nx+29,128)),floord(32*t2+Nx+28,128)),floord(8*t3+Nx+4,128)),floord(32*t1-32*t2+Nz+Nx+27,128));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),8*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),8*t3+6),128*t4+126),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(128*t4,t5+1);
ubv=min(128*t4+127,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
bdf2_turbulent_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela
//
#if !defined(KRATOS_BDF2_TURBULENT_SCHEME_H_INCLUDED )
#define KRATOS_BDF2_TURBULENT_SCHEME_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// External includes
// Project includes
#include "solving_strategies/schemes/scheme.h"
#include "includes/define.h"
// #include "includes/serializer.h"
#include "includes/dof.h"
#include "processes/process.h"
#include "containers/pointer_vector_set.h"
#include "utilities/coordinate_transformation_utilities.h"
// Application includes
#include "fluid_dynamics_application_variables.h"
namespace Kratos
{
///@addtogroup FluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// A scheme for BDF2 time integration.
/**
*/
template<class TSparseSpace,class TDenseSpace>
class BDF2TurbulentScheme : public Scheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of BDF2TurbulentScheme
KRATOS_CLASS_POINTER_DEFINITION(BDF2TurbulentScheme);
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename TSparseSpace::DataType TDataType;
typedef typename TSparseSpace::MatrixType TSystemMatrixType;
typedef typename TSparseSpace::VectorType TSystemVectorType;
typedef typename TDenseSpace::MatrixType LocalSystemMatrixType;
typedef typename TDenseSpace::VectorType LocalSystemVectorType;
typedef Dof<TDataType> TDofType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef CoordinateTransformationUtils<LocalSystemMatrixType, LocalSystemVectorType, double> RotationToolType;
typedef typename RotationToolType::UniquePointer RotationToolPointerType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
BDF2TurbulentScheme()
: Scheme<TSparseSpace, TDenseSpace>()
, mrPeriodicIdVar(Kratos::Variable<int>::StaticObject())
{}
/// Constructor to use the formulation combined with a turbulence model.
/**
* The turbulence model is assumed to be implemented as a Kratos::Process.
* The model's Execute() method wil be called at the start of each
* non-linear iteration.
* @param pTurbulenceModel pointer to the turbulence model
*/
BDF2TurbulentScheme(Process::Pointer pTurbulenceModel)
: Scheme<TSparseSpace, TDenseSpace>()
, mpTurbulenceModel(pTurbulenceModel)
, mrPeriodicIdVar(Kratos::Variable<int>::StaticObject())
{}
/// Constructor for periodic boundary conditions.
/**
* @param rPeriodicVar the variable used to store periodic pair indices.
*/
BDF2TurbulentScheme(const Kratos::Variable<int>& rPeriodicVar)
: Scheme<TSparseSpace, TDenseSpace>()
, mrPeriodicIdVar(rPeriodicVar)
{}
/// Destructor.
~BDF2TurbulentScheme() override
{}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/// Check input data for errors.
/**
* @param rModelPart The fluid's ModelPart
* @return 0 if no errors were found
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
// Base scheme check
int error_code = BaseType::Check(rModelPart);
if (error_code != 0) {
return error_code;
}
// Check buffer size
KRATOS_ERROR_IF(rModelPart.GetBufferSize() < 3)
<< "Insufficient buffer size for BDF2, should be at least 3, got " << rModelPart.GetBufferSize() << std::endl;
return 0;
KRATOS_CATCH("");
}
void Initialize(ModelPart& rModelPart) override
{
// Set up the rotation tool pointer
const auto& r_proces_info = rModelPart.GetProcessInfo();
const unsigned int domain_size = r_proces_info[DOMAIN_SIZE];
auto p_aux = Kratos::make_unique<RotationToolType>(domain_size, domain_size + 1, SLIP);
mpRotationTool.swap(p_aux);
// Base initialize call
BaseType::Initialize(rModelPart);
}
/// Set the time iteration coefficients
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
this->SetTimeCoefficients(rModelPart.GetProcessInfo());
// Base function initializes elements and conditions
BaseType::InitializeSolutionStep(rModelPart,A,Dx,b);
// Recalculate mesh velocity (to account for variable time step)
const double tol = 1.0e-12;
const double Dt = rModelPart.GetProcessInfo()[DELTA_TIME];
const double OldDt = rModelPart.GetProcessInfo().GetPreviousSolutionStepInfo(1)[DELTA_TIME];
if(std::abs(Dt - OldDt) > tol) {
const int n_nodes = rModelPart.NumberOfNodes();
const Vector& BDFcoefs = rModelPart.GetProcessInfo()[BDF_COEFFICIENTS];
#pragma omp parallel for
for(int i_node = 0; i_node < n_nodes; ++i_node) {
auto it_node = rModelPart.NodesBegin() + i_node;
auto& rMeshVel = it_node->FastGetSolutionStepValue(MESH_VELOCITY);
const auto& rDisp0 = it_node->FastGetSolutionStepValue(DISPLACEMENT);
const auto& rDisp1 = it_node->FastGetSolutionStepValue(DISPLACEMENT,1);
const auto& rDisp2 = it_node->FastGetSolutionStepValue(DISPLACEMENT,2);
rMeshVel = BDFcoefs[0] * rDisp0 + BDFcoefs[1] * rDisp1 + BDFcoefs[2] * rDisp2;
}
}
}
void InitializeNonLinIteration(
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
if (mpTurbulenceModel != 0) mpTurbulenceModel->Execute();
KRATOS_CATCH("")
}
void FinalizeNonLinIteration(
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//if orthogonal subscales are computed
if (CurrentProcessInfo[OSS_SWITCH] == 1.0)
{
this->LumpedProjection(rModelPart);
//this->FullProjection(rModelPart);
}
}
/// Start the iteration by providing a first approximation to the solution.
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
const int n_nodes = rModelPart.NumberOfNodes();
const Vector& BDFcoefs = rModelPart.GetProcessInfo()[BDF_COEFFICIENTS];
#pragma omp parallel for
for(int i_node = 0; i_node < n_nodes; ++i_node) {
auto it_node = rModelPart.NodesBegin() + i_node;
auto& rVel0 = it_node->FastGetSolutionStepValue(VELOCITY);
const auto& rVel1 = it_node->FastGetSolutionStepValue(VELOCITY,1);
const auto& rVel2 = it_node->FastGetSolutionStepValue(VELOCITY,2);
auto& rAcceleration = it_node->FastGetSolutionStepValue(ACCELERATION);
// Predict velocities
if(!it_node->IsFixed(VELOCITY_X))
rVel0[0] = 2.00 * rVel1[0] - rVel2[0];
if(!it_node->IsFixed(VELOCITY_Y))
rVel0[1] = 2.00 * rVel1[1] - rVel2[1];
if(!it_node->IsFixed(VELOCITY_Z))
rVel0[2] = 2.00 * rVel1[2] - rVel2[2];
// Predict acceleration
rAcceleration = BDFcoefs[0] * rVel0 + BDFcoefs[1] * rVel1 + BDFcoefs[2] * rVel2;
}
KRATOS_CATCH("")
}
/// Store the iteration results as solution step variables and update acceleration after a Newton-Raphson iteration.
/**
* @param rModelPart fluid ModelPart
* @param rDofSet DofSet containing the Newton-Raphson system degrees of freedom.
* @param A Newton-Raphson system matrix (unused)
* @param Dx Newton-Raphson iteration solution
* @param b Newton-Raphson right hand side (unused)
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
mpRotationTool->RotateVelocities(rModelPart);
mpDofUpdater->UpdateDofs(rDofSet,Dx);
mpRotationTool->RecoverVelocities(rModelPart);
const Vector& BDFCoefs = rModelPart.GetProcessInfo()[BDF_COEFFICIENTS];
this->UpdateAcceleration(rModelPart,BDFCoefs);
KRATOS_CATCH("")
}
void CalculateSystemContributions(
Element& rCurrentElement,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY
LocalSystemMatrixType Mass;
LocalSystemMatrixType Damp;
// Initialize element
rCurrentElement.InitializeNonLinearIteration(rCurrentProcessInfo);
// Get Equation Id
rCurrentElement.EquationIdVector(rEquationId,rCurrentProcessInfo);
// Get matrix contributions
rCurrentElement.CalculateLocalSystem(LHS_Contribution,RHS_Contribution,rCurrentProcessInfo);
rCurrentElement.CalculateMassMatrix(Mass,rCurrentProcessInfo);
rCurrentElement.CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo);
// Add the dynamic contributions to the local system using BDF2 coefficients
this->CombineLHSContributions(LHS_Contribution,Mass,Damp,rCurrentProcessInfo);
this->AddDynamicRHSContribution<Kratos::Element>(rCurrentElement,RHS_Contribution,Mass,rCurrentProcessInfo);
// Apply slip condition
mpRotationTool->Rotate(LHS_Contribution, RHS_Contribution, rCurrentElement.GetGeometry());
mpRotationTool->ApplySlipCondition(LHS_Contribution, RHS_Contribution, rCurrentElement.GetGeometry());
KRATOS_CATCH("")
}
void CalculateRHSContribution(
Element& rCurrentElement,
LocalSystemVectorType &RHS_Contribution,
Element::EquationIdVectorType &rEquationId,
const ProcessInfo &rCurrentProcessInfo) override
{
KRATOS_TRY
LocalSystemMatrixType Mass;
LocalSystemMatrixType Damp;
// Initialize element
rCurrentElement.InitializeNonLinearIteration(rCurrentProcessInfo);
// Get Equation Id
rCurrentElement.EquationIdVector(rEquationId,rCurrentProcessInfo);
// Get matrix contributions
rCurrentElement.CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo);
rCurrentElement.CalculateMassMatrix(Mass,rCurrentProcessInfo);
rCurrentElement.CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo);
// Add the dynamic contributions to the local system using BDF2 coefficients
this->AddDynamicRHSContribution<Kratos::Element>(rCurrentElement,RHS_Contribution,Mass,rCurrentProcessInfo);
// Apply slip condition
mpRotationTool->Rotate(RHS_Contribution, rCurrentElement.GetGeometry());
mpRotationTool->ApplySlipCondition(RHS_Contribution, rCurrentElement.GetGeometry());
KRATOS_CATCH("")
}
void CalculateSystemContributions(
Condition& rCurrentCondition,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY
LocalSystemMatrixType Mass;
LocalSystemMatrixType Damp;
// Initialize element
rCurrentCondition.InitializeNonLinearIteration(rCurrentProcessInfo);
// Get Equation Id
rCurrentCondition.EquationIdVector(rEquationId,rCurrentProcessInfo);
// Get matrix contributions
rCurrentCondition.CalculateLocalSystem(LHS_Contribution,RHS_Contribution,rCurrentProcessInfo);
rCurrentCondition.CalculateMassMatrix(Mass,rCurrentProcessInfo);
rCurrentCondition.CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo);
// Add the dynamic contributions to the local system using BDF2 coefficients
this->CombineLHSContributions(LHS_Contribution,Mass,Damp,rCurrentProcessInfo);
this->AddDynamicRHSContribution<Kratos::Condition>(rCurrentCondition,RHS_Contribution,Mass,rCurrentProcessInfo);
// Apply slip condition
mpRotationTool->Rotate(LHS_Contribution, RHS_Contribution, rCurrentCondition.GetGeometry());
mpRotationTool->ApplySlipCondition(LHS_Contribution, RHS_Contribution, rCurrentCondition.GetGeometry());
KRATOS_CATCH("")
}
void CalculateRHSContribution(
Condition &rCurrentCondition,
LocalSystemVectorType &RHS_Contribution,
Element::EquationIdVectorType &rEquationId,
const ProcessInfo &rCurrentProcessInfo) override
{
KRATOS_TRY
LocalSystemMatrixType Mass;
LocalSystemMatrixType Damp;
// Initialize element
rCurrentCondition.InitializeNonLinearIteration(rCurrentProcessInfo);
// Get Equation Id
rCurrentCondition.EquationIdVector(rEquationId,rCurrentProcessInfo);
// Get matrix contributions
rCurrentCondition.CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo);
rCurrentCondition.CalculateMassMatrix(Mass,rCurrentProcessInfo);
rCurrentCondition.CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo);
// Add the dynamic contributions to the local system using BDF2 coefficients
this->AddDynamicRHSContribution<Kratos::Condition>(rCurrentCondition,RHS_Contribution,Mass,rCurrentProcessInfo);
// Apply slip condition
mpRotationTool->Rotate(RHS_Contribution, rCurrentCondition.GetGeometry());
mpRotationTool->ApplySlipCondition(RHS_Contribution, rCurrentCondition.GetGeometry());
KRATOS_CATCH("")
}
/// Free memory allocated by this object.
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "BDF2TurbulentScheme";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/// Calculate the coefficients for time iteration.
/**
* @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables.
*/
void SetTimeCoefficients(ProcessInfo& rCurrentProcessInfo)
{
KRATOS_TRY;
//calculate the BDF coefficients
double Dt = rCurrentProcessInfo[DELTA_TIME];
double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME];
double Rho = OldDt / Dt;
double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho);
Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(3, false);
BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant)
BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant)
BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant)
KRATOS_CATCH("");
}
/// Update Dof values after a Newton-Raphson iteration.
/**
* @param rDofSet Container for the Degrees of freedom in the system
* @param Dx Solution of the linear system
*/
virtual void UpdateDofs(
DofsArrayType& rDofSet,
TSystemVectorType& Dx)
{
KRATOS_TRY
const int n_dof = rDofSet.size();
#pragma omp parallel for
for (int i_dof = 0; i_dof < n_dof; ++i_dof) {
auto it_dof = rDofSet.begin() + i_dof;
if (it_dof->IsFree()) {
it_dof->GetSolutionStepValue() += TSparseSpace::GetValue(Dx, it_dof->EquationId());
}
}
KRATOS_CATCH("")
}
/// Update Dof values after a Newton-Raphson iteration
/**
* @param rModelPart fluid ModelPart
* @param rBDFcoefs Time stepping coefficients for this iteration.
*/
void UpdateAcceleration(
ModelPart& rModelPart,
const Vector& rBDFcoefs)
{
KRATOS_TRY
const double Coef0 = rBDFcoefs[0];
const double Coef1 = rBDFcoefs[1];
const double Coef2 = rBDFcoefs[2];
const int n_nodes = rModelPart.NumberOfNodes();
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; ++i_node) {
auto it_node = rModelPart.NodesBegin() + i_node;
const auto& rVel0 = it_node->FastGetSolutionStepValue(VELOCITY);
const auto& rVel1 = it_node->FastGetSolutionStepValue(VELOCITY,1);
const auto& rVel2 = it_node->FastGetSolutionStepValue(VELOCITY,2);
auto& rAcceleration = it_node->FastGetSolutionStepValue(ACCELERATION);
rAcceleration = Coef0 * rVel0 + Coef1 * rVel1 + Coef2 * rVel2;
}
KRATOS_CATCH("")
}
void CombineLHSContributions(
LocalSystemMatrixType& rLHS,
LocalSystemMatrixType& rMass,
LocalSystemMatrixType& rDamp,
const ProcessInfo& rCurrentProcessInfo)
{
const double Coef0 = rCurrentProcessInfo.GetValue(BDF_COEFFICIENTS)[0];
if (rMass.size1() != 0) noalias(rLHS) += Coef0 * rMass;
if (rDamp.size1() != 0) noalias(rLHS) += rDamp;
}
template<class TObject>
void AddDynamicRHSContribution(
TObject& rObject,
LocalSystemVectorType& rRHS,
LocalSystemMatrixType& rMass,
const ProcessInfo& rCurrentProcessInfo)
{
if (rMass.size1() != 0)
{
const Vector& rCoefs = rCurrentProcessInfo.GetValue(BDF_COEFFICIENTS);
const auto& r_const_obj_ref = rObject;
LocalSystemVectorType Acc;
r_const_obj_ref.GetFirstDerivativesVector(Acc);
Acc *= rCoefs[0];
for(unsigned int n = 1; n < 3; ++n)
{
LocalSystemVectorType rVel;
r_const_obj_ref.GetFirstDerivativesVector(rVel,n);
noalias(Acc) += rCoefs[n] * rVel;
}
noalias(rRHS) -= prod(rMass,Acc);
}
}
void FullProjection(ModelPart& rModelPart)
{
const ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
// Initialize containers
const int n_nodes = rModelPart.NumberOfNodes();
const int n_elems = rModelPart.NumberOfElements();
const array_1d<double,3> zero_vect = ZeroVector(3);
#pragma omp parallel for firstprivate(zero_vect)
for (int i_node = 0; i_node < n_nodes; ++i_node) {
auto ind = rModelPart.NodesBegin() + i_node;
noalias(ind->FastGetSolutionStepValue(ADVPROJ)) = zero_vect; // "x"
ind->FastGetSolutionStepValue(DIVPROJ) = 0.0; // "x"
ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; // "Ml"
}
// Newton-Raphson parameters
const double RelTol = 1e-4 * rModelPart.NumberOfNodes();
const double AbsTol = 1e-6 * rModelPart.NumberOfNodes();
const unsigned int MaxIter = 100;
// iteration variables
unsigned int iter = 0;
array_1d<double,3> dMomProj = zero_vect;
double dMassProj = 0.0;
double RelMomErr = 1000.0 * RelTol;
double RelMassErr = 1000.0 * RelTol;
double AbsMomErr = 1000.0 * AbsTol;
double AbsMassErr = 1000.0 * AbsTol;
while( ( (AbsMomErr > AbsTol && RelMomErr > RelTol) || (AbsMassErr > AbsTol && RelMassErr > RelTol) ) && iter < MaxIter)
{
// Reinitialize RHS
#pragma omp parallel for firstprivate(zero_vect)
for (int i_node = 0; i_node < n_nodes; ++i_node)
{
auto ind = rModelPart.NodesBegin() + i_node;
noalias(ind->GetValue(ADVPROJ)) = zero_vect; // "b"
ind->GetValue(DIVPROJ) = 0.0; // "b"
ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; // Reset because Calculate will overwrite it
}
// Reinitialize errors
RelMomErr = 0.0;
RelMassErr = 0.0;
AbsMomErr = 0.0;
AbsMassErr = 0.0;
// Compute new values
array_1d<double, 3 > output;
#pragma omp parallel for private(output)
for (int i_elem = 0; i_elem < n_elems; ++i_elem) {
auto it_elem = rModelPart.ElementsBegin() + i_elem;
it_elem->Calculate(SUBSCALE_VELOCITY, output, rCurrentProcessInfo);
}
rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA);
rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ);
rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ);
rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ);
rModelPart.GetCommunicator().AssembleNonHistoricalData(ADVPROJ);
// Update iteration variables
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; ++i_node) {
auto ind = rModelPart.NodesBegin() + i_node;
const double Area = ind->FastGetSolutionStepValue(NODAL_AREA); // Ml dx = b - Mc x
dMomProj = ind->GetValue(ADVPROJ) / Area;
dMassProj = ind->GetValue(DIVPROJ) / Area;
RelMomErr += sqrt( dMomProj[0]*dMomProj[0] + dMomProj[1]*dMomProj[1] + dMomProj[2]*dMomProj[2]);
RelMassErr += fabs(dMassProj);
auto& rMomRHS = ind->FastGetSolutionStepValue(ADVPROJ);
double& rMassRHS = ind->FastGetSolutionStepValue(DIVPROJ);
rMomRHS += dMomProj;
rMassRHS += dMassProj;
AbsMomErr += sqrt( rMomRHS[0]*rMomRHS[0] + rMomRHS[1]*rMomRHS[1] + rMomRHS[2]*rMomRHS[2]);
AbsMassErr += fabs(rMassRHS);
}
if(AbsMomErr > 1e-10)
RelMomErr /= AbsMomErr;
else // If residual is close to zero, force absolute convergence to avoid division by zero errors
RelMomErr = 1000.0;
if(AbsMassErr > 1e-10)
RelMassErr /= AbsMassErr;
else
RelMassErr = 1000.0;
iter++;
}
KRATOS_INFO("BDF2TurbulentScheme") << "Performed OSS Projection in " << iter << " iterations" << std::endl;
}
void LumpedProjection(ModelPart& rModelPart)
{
const int n_nodes = rModelPart.NumberOfNodes();
const int n_elems = rModelPart.NumberOfElements();
const ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
const array_1d<double,3> zero_vect = ZeroVector(3);
#pragma omp parallel for firstprivate(zero_vect)
for (int i_node = 0; i_node < n_nodes; ++i_node) {
auto itNode = rModelPart.NodesBegin() + i_node;
noalias(itNode->FastGetSolutionStepValue(ADVPROJ)) = zero_vect;
itNode->FastGetSolutionStepValue(DIVPROJ) = 0.0;
itNode->FastGetSolutionStepValue(NODAL_AREA) = 0.0;
}
array_1d<double, 3 > Out;
#pragma omp parallel for private(Out)
for (int i_elem = 0; i_elem < n_elems; ++i_elem) {
auto itElem = rModelPart.ElementsBegin() + i_elem;
itElem->Calculate(ADVPROJ, Out, rCurrentProcessInfo);
}
rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA);
rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ);
rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ);
// Correction for periodic conditions
if (mrPeriodicIdVar.Key() != 0) {
this->PeriodicConditionProjectionCorrection(rModelPart);
}
const double zero_tol = 1.0e-12;
#pragma omp parallel for firstprivate(zero_tol)
for (int i_node = 0; i_node < n_nodes; ++i_node){
auto iNode = rModelPart.NodesBegin() + i_node;
if (iNode->FastGetSolutionStepValue(NODAL_AREA) < zero_tol) {
iNode->FastGetSolutionStepValue(NODAL_AREA) = 1.0;
}
const double Area = iNode->FastGetSolutionStepValue(NODAL_AREA);
iNode->FastGetSolutionStepValue(ADVPROJ) /= Area;
iNode->FastGetSolutionStepValue(DIVPROJ) /= Area;
}
KRATOS_INFO("BDF2TurbulentScheme") << "Computing OSS projections" << std::endl;
}
/** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on
* both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n
* 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n
* 2- The non-historical containers are added across processes, communicating the right value from the condition owner to all partitions.\n
* 3- The value on all periodic nodes is replaced by the one received in step 2.
*/
void PeriodicConditionProjectionCorrection(ModelPart& rModelPart)
{
const int num_nodes = rModelPart.NumberOfNodes();
const int num_conditions = rModelPart.NumberOfConditions();
#pragma omp parallel for
for (int i = 0; i < num_nodes; i++) {
auto it_node = rModelPart.NodesBegin() + i;
it_node->SetValue(NODAL_AREA,0.0);
it_node->SetValue(ADVPROJ,ZeroVector(3));
it_node->SetValue(DIVPROJ,0.0);
}
#pragma omp parallel for
for (int i = 0; i < num_conditions; i++) {
auto it_cond = rModelPart.ConditionsBegin() + i;
if(it_cond->Is(PERIODIC)) {
this->AssemblePeriodicContributionToProjections(it_cond->GetGeometry());
}
}
rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA);
rModelPart.GetCommunicator().AssembleNonHistoricalData(ADVPROJ);
rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ);
#pragma omp parallel for
for (int i = 0; i < num_nodes; i++) {
auto it_node = rModelPart.NodesBegin() + i;
this->CorrectContributionsOnPeriodicNode(*it_node);
}
}
void AssemblePeriodicContributionToProjections(Geometry< Node<3> >& rGeometry)
{
unsigned int nodes_in_cond = rGeometry.PointsNumber();
double nodal_area = 0.0;
array_1d<double,3> momentum_projection = ZeroVector(3);
double mass_projection = 0.0;
for ( unsigned int i = 0; i < nodes_in_cond; i++ )
{
auto& r_node = rGeometry[i];
nodal_area += r_node.FastGetSolutionStepValue(NODAL_AREA);
noalias(momentum_projection) += r_node.FastGetSolutionStepValue(ADVPROJ);
mass_projection += r_node.FastGetSolutionStepValue(DIVPROJ);
}
for ( unsigned int i = 0; i < nodes_in_cond; i++ )
{
auto& r_node = rGeometry[i];
/* Note that this loop is expected to be threadsafe in normal conditions,
* since each node should belong to a single periodic link. However, I am
* setting the locks for openmp in case that we try more complicated things
* in the future (like having different periodic conditions for different
* coordinate directions).
*/
r_node.SetLock();
r_node.GetValue(NODAL_AREA) = nodal_area;
noalias(r_node.GetValue(ADVPROJ)) = momentum_projection;
r_node.GetValue(DIVPROJ) = mass_projection;
r_node.UnSetLock();
}
}
void CorrectContributionsOnPeriodicNode(Node<3>& rNode)
{
//TODO: This needs to be done in another manner as soon as we start using non-historical NODAL_AREA
if (rNode.GetValue(NODAL_AREA) != 0.0) // Only periodic nodes will have a non-historical NODAL_AREA set.
{
rNode.FastGetSolutionStepValue(NODAL_AREA) = rNode.GetValue(NODAL_AREA);
noalias(rNode.FastGetSolutionStepValue(ADVPROJ)) = rNode.GetValue(ADVPROJ);
rNode.FastGetSolutionStepValue(DIVPROJ) = rNode.GetValue(DIVPROJ);
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
/// Pointer to a turbulence model
Process::Pointer mpTurbulenceModel = nullptr;
RotationToolPointerType mpRotationTool = nullptr;
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater();
const Kratos::Variable<int>& mrPeriodicIdVar;
// ///@}
// ///@name Serialization
// ///@{
//
// friend class Serializer;
//
// virtual void save(Serializer& rSerializer) const
// {
// KRATOS_SERIALIZE_SAVE_BASE_CLASS(rSerializer, BaseType );
// rSerializer.save("mpTurbulenceModel",mpTurbulenceModel);
// }
//
// virtual void load(Serializer& rSerializer)
// {
// KRATOS_SERIALIZE_LOAD_BASE_CLASS(rSerializer, BaseType );
// rSerializer.load("mpTurbulenceModel",mpTurbulenceModel);
// }
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
BDF2TurbulentScheme & operator=(BDF2TurbulentScheme const& rOther)
{}
/// Copy constructor.
BDF2TurbulentScheme(BDF2TurbulentScheme const& rOther)
{}
///@}
}; // Class BDF2TurbulentScheme
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template<class TSparseSpace,class TDenseSpace>
inline std::istream& operator >>(std::istream& rIStream,BDF2TurbulentScheme<TSparseSpace,TDenseSpace>& rThis)
{
return rIStream;
}
/// output stream function
template<class TSparseSpace,class TDenseSpace>
inline std::ostream& operator <<(std::ostream& rOStream,const BDF2TurbulentScheme<TSparseSpace,TDenseSpace>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
} // namespace Kratos.
#endif // KRATOS_BDF2_TURBULENT_SCHEME_H_INCLUDED defined
|
convolutiondepthwise_5x5_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _k00 = _mm256_loadu_ps(k0);
__m256 _k01 = _mm256_loadu_ps(k0 + 8);
__m256 _k02 = _mm256_loadu_ps(k0 + 16);
__m256 _k03 = _mm256_loadu_ps(k0 + 24);
__m256 _k04 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k03, _r03, _sum0);
_sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _k10 = _mm256_loadu_ps(k0);
__m256 _k11 = _mm256_loadu_ps(k0 + 8);
__m256 _k12 = _mm256_loadu_ps(k0 + 16);
__m256 _k13 = _mm256_loadu_ps(k0 + 24);
__m256 _k14 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k13, _r13, _sum0);
_sum0 = _mm256_fmadd_ps(_k14, _r14, _sum0);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
__m256 _k20 = _mm256_loadu_ps(k0);
__m256 _k21 = _mm256_loadu_ps(k0 + 8);
__m256 _k22 = _mm256_loadu_ps(k0 + 16);
__m256 _k23 = _mm256_loadu_ps(k0 + 24);
__m256 _k24 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
_sum0 = _mm256_fmadd_ps(_k23, _r23, _sum0);
_sum0 = _mm256_fmadd_ps(_k24, _r24, _sum0);
__m256 _r30 = _mm256_loadu_ps(r3);
__m256 _r31 = _mm256_loadu_ps(r3 + 8);
__m256 _r32 = _mm256_loadu_ps(r3 + 16);
__m256 _r33 = _mm256_loadu_ps(r3 + 24);
__m256 _r34 = _mm256_loadu_ps(r3 + 32);
__m256 _k30 = _mm256_loadu_ps(k0);
__m256 _k31 = _mm256_loadu_ps(k0 + 8);
__m256 _k32 = _mm256_loadu_ps(k0 + 16);
__m256 _k33 = _mm256_loadu_ps(k0 + 24);
__m256 _k34 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k30, _r30, _sum0);
_sum0 = _mm256_fmadd_ps(_k31, _r31, _sum0);
_sum0 = _mm256_fmadd_ps(_k32, _r32, _sum0);
_sum0 = _mm256_fmadd_ps(_k33, _r33, _sum0);
_sum0 = _mm256_fmadd_ps(_k34, _r34, _sum0);
__m256 _r40 = _mm256_loadu_ps(r4);
__m256 _r41 = _mm256_loadu_ps(r4 + 8);
__m256 _r42 = _mm256_loadu_ps(r4 + 16);
__m256 _r43 = _mm256_loadu_ps(r4 + 24);
__m256 _r44 = _mm256_loadu_ps(r4 + 32);
__m256 _k40 = _mm256_loadu_ps(k0);
__m256 _k41 = _mm256_loadu_ps(k0 + 8);
__m256 _k42 = _mm256_loadu_ps(k0 + 16);
__m256 _k43 = _mm256_loadu_ps(k0 + 24);
__m256 _k44 = _mm256_loadu_ps(k0 + 32);
k0 -= 160;
_sum0 = _mm256_fmadd_ps(_k40, _r40, _sum0);
_sum0 = _mm256_fmadd_ps(_k41, _r41, _sum0);
_sum0 = _mm256_fmadd_ps(_k42, _r42, _sum0);
_sum0 = _mm256_fmadd_ps(_k43, _r43, _sum0);
_sum0 = _mm256_fmadd_ps(_k44, _r44, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
outptr0 += 8;
}
r0 += 4 * 8;
r1 += 4 * 8;
r2 += 4 * 8;
r3 += 4 * 8;
r4 += 4 * 8;
}
}
}
static void convdw5x5s2_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _k00 = _mm256_loadu_ps(k0);
__m256 _k01 = _mm256_loadu_ps(k0 + 8);
__m256 _k02 = _mm256_loadu_ps(k0 + 16);
__m256 _k03 = _mm256_loadu_ps(k0 + 24);
__m256 _k04 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k03, _r03, _sum0);
_sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _k10 = _mm256_loadu_ps(k0);
__m256 _k11 = _mm256_loadu_ps(k0 + 8);
__m256 _k12 = _mm256_loadu_ps(k0 + 16);
__m256 _k13 = _mm256_loadu_ps(k0 + 24);
__m256 _k14 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k13, _r13, _sum0);
_sum0 = _mm256_fmadd_ps(_k14, _r14, _sum0);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
__m256 _k20 = _mm256_loadu_ps(k0);
__m256 _k21 = _mm256_loadu_ps(k0 + 8);
__m256 _k22 = _mm256_loadu_ps(k0 + 16);
__m256 _k23 = _mm256_loadu_ps(k0 + 24);
__m256 _k24 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
_sum0 = _mm256_fmadd_ps(_k23, _r23, _sum0);
_sum0 = _mm256_fmadd_ps(_k24, _r24, _sum0);
__m256 _r30 = _mm256_loadu_ps(r3);
__m256 _r31 = _mm256_loadu_ps(r3 + 8);
__m256 _r32 = _mm256_loadu_ps(r3 + 16);
__m256 _r33 = _mm256_loadu_ps(r3 + 24);
__m256 _r34 = _mm256_loadu_ps(r3 + 32);
__m256 _k30 = _mm256_loadu_ps(k0);
__m256 _k31 = _mm256_loadu_ps(k0 + 8);
__m256 _k32 = _mm256_loadu_ps(k0 + 16);
__m256 _k33 = _mm256_loadu_ps(k0 + 24);
__m256 _k34 = _mm256_loadu_ps(k0 + 32);
k0 += 40;
_sum0 = _mm256_fmadd_ps(_k30, _r30, _sum0);
_sum0 = _mm256_fmadd_ps(_k31, _r31, _sum0);
_sum0 = _mm256_fmadd_ps(_k32, _r32, _sum0);
_sum0 = _mm256_fmadd_ps(_k33, _r33, _sum0);
_sum0 = _mm256_fmadd_ps(_k34, _r34, _sum0);
__m256 _r40 = _mm256_loadu_ps(r4);
__m256 _r41 = _mm256_loadu_ps(r4 + 8);
__m256 _r42 = _mm256_loadu_ps(r4 + 16);
__m256 _r43 = _mm256_loadu_ps(r4 + 24);
__m256 _r44 = _mm256_loadu_ps(r4 + 32);
__m256 _k40 = _mm256_loadu_ps(k0);
__m256 _k41 = _mm256_loadu_ps(k0 + 8);
__m256 _k42 = _mm256_loadu_ps(k0 + 16);
__m256 _k43 = _mm256_loadu_ps(k0 + 24);
__m256 _k44 = _mm256_loadu_ps(k0 + 32);
k0 -= 160;
_sum0 = _mm256_fmadd_ps(_k40, _r40, _sum0);
_sum0 = _mm256_fmadd_ps(_k41, _r41, _sum0);
_sum0 = _mm256_fmadd_ps(_k42, _r42, _sum0);
_sum0 = _mm256_fmadd_ps(_k43, _r43, _sum0);
_sum0 = _mm256_fmadd_ps(_k44, _r44, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
r4 += 16;
outptr0 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
deprecate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE %
% D D E P P R R E C A A T E %
% D D EEE PPPPP RRRR EEE C AAAAA T EEE %
% D D E P R R E C A A T E %
% DDDD EEEEE P R R EEEEE CCCC A A T EEEEE %
% %
% %
% MagickWand Deprecated Methods %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "wand/studio.h"
#include "wand/MagickWand.h"
#include "wand/magick-wand-private.h"
#include "wand/wand.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define PixelViewId "PixelView"
/*
Typedef declarations.
*/
struct _PixelView
{
size_t
id;
char
name[MaxTextExtent];
ExceptionInfo
*exception;
MagickWand
*wand;
CacheView
*view;
RectangleInfo
region;
size_t
number_threads;
PixelWand
***pixel_wands;
MagickBooleanType
debug;
size_t
signature;
};
#if !defined(MAGICKCORE_EXCLUDE_DEPRECATED)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w A l l o c a t e W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAllocateWand() allocates an initial drawing wand which is an opaque
% handle required by the remaining drawing methods.
%
% The format of the DrawAllocateWand method is:
%
% DrawingWand DrawAllocateWand(const DrawInfo *draw_info,Image *image)
%
% A description of each parameter follows:
%
% o draw_info: Initial drawing defaults. Set to NULL to use defaults.
%
% o image: the image to draw on.
%
*/
WandExport DrawingWand *DrawAllocateWand(const DrawInfo *draw_info,Image *image)
{
return(AcquireDrawingWand(draw_info,image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k A v e r a g e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickAverageImages() average a set of images.
%
% The format of the MagickAverageImages method is:
%
% MagickWand *MagickAverageImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
static MagickWand *CloneMagickWandFromImages(const MagickWand *wand,
Image *images)
{
MagickWand
*clone_wand;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand));
if (clone_wand == (MagickWand *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
images->filename);
(void) memset(clone_wand,0,sizeof(*clone_wand));
clone_wand->id=AcquireWandId();
(void) FormatLocaleString(clone_wand->name,MaxTextExtent,"%s-%.20g",
MagickWandId,(double) clone_wand->id);
clone_wand->exception=AcquireExceptionInfo();
InheritException(clone_wand->exception,wand->exception);
clone_wand->image_info=CloneImageInfo(wand->image_info);
clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info);
clone_wand->images=images;
clone_wand->debug=IsEventLogging();
if (clone_wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name);
clone_wand->signature=WandSignature;
return(clone_wand);
}
WandExport MagickWand *MagickAverageImages(MagickWand *wand)
{
Image
*average_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
average_image=EvaluateImages(wand->images,MeanEvaluateOperator,
wand->exception);
if (average_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,average_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelView() makes a copy of the specified pixel view.
%
% The format of the ClonePixelView method is:
%
% PixelView *ClonePixelView(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport PixelView *ClonePixelView(const PixelView *pixel_view)
{
PixelView
*clone_view;
register ssize_t
i;
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
if (pixel_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name);
clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
pixel_view->name);
(void) memset(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) clone_view->id);
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,pixel_view->exception);
clone_view->view=CloneCacheView(pixel_view->view);
clone_view->region=pixel_view->region;
clone_view->number_threads=pixel_view->number_threads;
for (i=0; i < (ssize_t) pixel_view->number_threads; i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
pixel_view->pixel_wands[i],pixel_view->region.width);
clone_view->debug=pixel_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=WandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelView() deallocates memory associated with a pixel view.
%
% The format of the DestroyPixelView method is:
%
% PixelView *DestroyPixelView(PixelView *pixel_view,
% const size_t number_wands,const size_t number_threads)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
% o number_wand: the number of pixel wands.
%
% o number_threads: number of threads.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands,const size_t number_threads)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport PixelView *DestroyPixelView(PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands,
pixel_view->region.width,pixel_view->number_threads);
pixel_view->view=DestroyCacheView(pixel_view->view);
pixel_view->exception=DestroyExceptionInfo(pixel_view->exception);
pixel_view->signature=(~WandSignature);
RelinquishWandId(pixel_view->id);
pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view);
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferPixelViewIterator() iterates over three pixel views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel region is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination pixel view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferPixelViewIterator method is:
%
% MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source,
% PixelView *duplex,PixelView *destination,
% DuplexTransferPixelViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o duplex: the duplex pixel view.
%
% o destination: the destination pixel view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferPixelViewIterator(
PixelView *source,PixelView *duplex,PixelView *destination,
DuplexTransferPixelViewMethod transfer,void *context)
{
#define DuplexTransferPixelViewTag "PixelView/DuplexTransfer"
ExceptionInfo
*exception;
Image
*destination_image,
*duplex_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (DuplexTransferPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
duplex_image=duplex->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict duplex_indexes,
*magick_restrict indexes;
register const PixelPacket
*magick_restrict duplex_pixels,
*magick_restrict pixels;
register IndexPacket
*magick_restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y,
duplex->region.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view);
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x);
if (duplex_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetBlackQuantum(duplex->pixel_wands[id][x],
GetPixelIndex(duplex_indexes+x));
if (duplex_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetIndex(duplex->pixel_wands[id][x],
GetPixelIndex(duplex_indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->region.x,y,destination->region.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelIndex(destination_indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(destination_indexes+x));
if (transfer(source,duplex,destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag,
progress,source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a pixel view.
%
% The format of the GetPixelViewException method is:
%
% char *GetPixelViewException(const PixelWand *pixel_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel pixel_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetPixelViewException(const PixelView *pixel_view,
ExceptionType *severity)
{
char
*description;
assert(pixel_view != (const PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
if (pixel_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=pixel_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
pixel_view->name);
*description='\0';
if (pixel_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
pixel_view->exception->severity,pixel_view->exception->reason),
MaxTextExtent);
if (pixel_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
pixel_view->exception->severity,pixel_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w H e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewHeight() returns the pixel view height.
%
% The format of the GetPixelViewHeight method is:
%
% size_t GetPixelViewHeight(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport size_t GetPixelViewHeight(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.height);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewIterator() iterates over the pixel view in parallel and calls
% your get method for each scanline of the view. The pixel region is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetPixelViewIterator method is:
%
% MagickBooleanType GetPixelViewIterator(PixelView *source,
% GetPixelViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetPixelViewIterator(PixelView *source,
GetPixelViewMethod get,void *context)
{
#define GetPixelViewTag "PixelView/Get"
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (get == (GetPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*indexes;
register const PixelPacket
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (get(source,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,GetPixelViewTag,progress,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewPixels() returns the pixel view pixel_wands.
%
% The format of the GetPixelViewPixels method is:
%
% PixelWand *GetPixelViewPixels(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view)
{
const int
id = GetOpenMPThreadId();
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewWand() returns the magick wand associated with the pixel view.
%
% The format of the GetPixelViewWand method is:
%
% MagickWand *GetPixelViewWand(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w W i d t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewWidth() returns the pixel view width.
%
% The format of the GetPixelViewWidth method is:
%
% size_t GetPixelViewWidth(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport size_t GetPixelViewWidth(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w X %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewX() returns the pixel view x offset.
%
% The format of the GetPixelViewX method is:
%
% ssize_t GetPixelViewX(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport ssize_t GetPixelViewX(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.x);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w Y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewY() returns the pixel view y offset.
%
% The format of the GetPixelViewY method is:
%
% ssize_t GetPixelViewY(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport ssize_t GetPixelViewY(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.y);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPixelView() returns MagickTrue if the parameter is verified as a pixel
% view container.
%
% The format of the IsPixelView method is:
%
% MagickBooleanType IsPixelView(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view)
{
size_t
length;
if (pixel_view == (const PixelView *) NULL)
return(MagickFalse);
if (pixel_view->signature != WandSignature)
return(MagickFalse);
length=strlen(PixelViewId);
if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C l i p P a t h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickClipPathImage() clips along the named paths from the 8BIM profile, if
% present. Later operations take effect inside the path. Id may be a number
% if preceded with #, to work on a numbered path, e.g., "#1" to use the first
% path.
%
% The format of the MagickClipPathImage method is:
%
% MagickBooleanType MagickClipPathImage(MagickWand *wand,
% const char *pathname,const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand,
const char *pathname,const MagickBooleanType inside)
{
return(MagickClipImagePath(wand,pathname,inside));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G e t F i l l A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGetFillAlpha() returns the alpha used when drawing using the fill
% color or fill texture. Fully opaque is 1.0.
%
% The format of the DrawGetFillAlpha method is:
%
% double DrawGetFillAlpha(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport double DrawGetFillAlpha(const DrawingWand *wand)
{
return(DrawGetFillOpacity(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G e t S t r o k e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGetStrokeAlpha() returns the alpha of stroked object outlines.
%
% The format of the DrawGetStrokeAlpha method is:
%
% double DrawGetStrokeAlpha(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
*/
WandExport double DrawGetStrokeAlpha(const DrawingWand *wand)
{
return(DrawGetStrokeOpacity(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P e e k G r a p h i c W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPeekGraphicWand() returns the current drawing wand.
%
% The format of the PeekDrawingWand method is:
%
% DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand)
{
return(PeekDrawingWand(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P o p G r a p h i c C o n t e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPopGraphicContext() destroys the current drawing wand and returns to the
% previously pushed drawing wand. Multiple drawing wands may exist. It is an
% error to attempt to pop more drawing wands than have been pushed, and it is
% proper form to pop all drawing wands which have been pushed.
%
% The format of the DrawPopGraphicContext method is:
%
% MagickBooleanType DrawPopGraphicContext(DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport void DrawPopGraphicContext(DrawingWand *wand)
{
(void) PopDrawingWand(wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P u s h G r a p h i c C o n t e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPushGraphicContext() clones the current drawing wand to create a new
% drawing wand. The original drawing wand(s) may be returned to by
% invoking PopDrawingWand(). The drawing wands are stored on a drawing wand
% stack. For every Pop there must have already been an equivalent Push.
%
% The format of the DrawPushGraphicContext method is:
%
% MagickBooleanType DrawPushGraphicContext(DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport void DrawPushGraphicContext(DrawingWand *wand)
{
(void) PushDrawingWand(wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w S e t F i l l A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawSetFillAlpha() sets the alpha to use when drawing using the fill
% color or fill texture. Fully opaque is 1.0.
%
% The format of the DrawSetFillAlpha method is:
%
% void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
% o fill_alpha: fill alpha
%
*/
WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha)
{
DrawSetFillOpacity(wand,fill_alpha);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w S e t S t r o k e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawSetStrokeAlpha() specifies the alpha of stroked object outlines.
%
% The format of the DrawSetStrokeAlpha method is:
%
% void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
% o stroke_alpha: stroke alpha. The value 1.0 is opaque.
%
*/
WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha)
{
DrawSetStrokeOpacity(wand,stroke_alpha);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C o l o r F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickColorFloodfillImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% The format of the MagickColorFloodfillImage method is:
%
% MagickBooleanType MagickColorFloodfillImage(MagickWand *wand,
% const PixelWand *fill,const double fuzz,const PixelWand *bordercolor,
% const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o fill: the floodfill color pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand,
const PixelWand *fill,const double fuzz,const PixelWand *bordercolor,
const ssize_t x,const ssize_t y)
{
DrawInfo
*draw_info;
MagickBooleanType
status;
PixelPacket
target;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL);
PixelGetQuantumColor(fill,&draw_info->fill);
(void) GetOneVirtualPixel(wand->images,x % wand->images->columns,
y % wand->images->rows,&target,wand->exception);
if (bordercolor != (PixelWand *) NULL)
PixelGetQuantumColor(bordercolor,&target);
wand->images->fuzz=fuzz;
status=ColorFloodfillImage(wand->images,draw_info,target,x,y,
bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k D e s c r i b e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickDescribeImage() identifies an image by printing its attributes to the
% file. Attributes include the image width, height, size, and others.
%
% The format of the MagickDescribeImage method is:
%
% const char *MagickDescribeImage(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport char *MagickDescribeImage(MagickWand *wand)
{
return(MagickIdentifyImage(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k F l a t t e n I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickFlattenImages() merges a sequence of images. This useful for
% combining Photoshop layers into a single image.
%
% The format of the MagickFlattenImages method is:
%
% MagickWand *MagickFlattenImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickFlattenImages(MagickWand *wand)
{
Image
*flatten_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
flatten_image=FlattenImages(wand->images,wand->exception);
if (flatten_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,flatten_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageAttribute() returns a value associated with the specified
% property. Use MagickRelinquishMemory() to free the value when you are
% finished with it.
%
% The format of the MagickGetImageAttribute method is:
%
% char *MagickGetImageAttribute(MagickWand *wand,const char *property)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o property: the property.
%
*/
WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property)
{
return(MagickGetImageProperty(wand,property));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageIndex() returns the index of the current image.
%
% The format of the MagickGetImageIndex method is:
%
% ssize_t MagickGetImageIndex(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport ssize_t MagickGetImageIndex(MagickWand *wand)
{
return(MagickGetIteratorIndex(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageChannelExtrema() gets the extrema for one or more image
% channels.
%
% The format of the MagickGetImageChannelExtrema method is:
%
% MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand,
% const ChannelType channel,size_t *minima,size_t *maxima)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the image channel(s).
%
% o minima: The minimum pixel value for the specified channel(s).
%
% o maxima: The maximum pixel value for the specified channel(s).
%
*/
WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand,
const ChannelType channel,size_t *minima,size_t *maxima)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=GetImageChannelExtrema(wand->images,channel,minima,maxima,
wand->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageExtrema() gets the extrema for the image.
%
% The format of the MagickGetImageExtrema method is:
%
% MagickBooleanType MagickGetImageExtrema(MagickWand *wand,
% size_t *minima,size_t *maxima)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o minima: The minimum pixel value for the specified channel(s).
%
% o maxima: The maximum pixel value for the specified channel(s).
%
*/
WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand,
size_t *minima,size_t *maxima)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=GetImageExtrema(wand->images,minima,maxima,wand->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e M a t t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageMatte() returns MagickTrue if the image has a matte channel
% otherwise MagickFalse.
%
% The format of the MagickGetImageMatte method is:
%
% size_t MagickGetImageMatte(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand)
{
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
return(wand->images->matte);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImagePixels() extracts pixel data from an image and returns it to
% you. The method returns MagickTrue on success otherwise MagickFalse if an
% error is encountered. The data is returned as char, short int, int, ssize_t,
% float, or double in the order specified by map.
%
% Suppose you want to extract the first scanline of a 640x480 image as
% character data in red-green-blue order:
%
% MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the MagickGetImagePixels method is:
%
% MagickBooleanType MagickGetImagePixels(MagickWand *wand,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,const char *map,const StorageType storage,
% void *pixels)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x, y, columns, rows: These values define the perimeter
% of a region of pixels you want to extract.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from
% these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
*/
WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,const char *map,const StorageType storage,
void *pixels)
{
return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageSize() returns the image length in bytes.
%
% The format of the MagickGetImageSize method is:
%
% MagickBooleanType MagickGetImageSize(MagickWand *wand,
% MagickSizeType *length)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o length: the image length in bytes.
%
*/
WandExport MagickSizeType MagickGetImageSize(MagickWand *wand)
{
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
return(GetBlobSize(wand->images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMapImage() replaces the colors of an image with the closest color
% from a reference image.
%
% The format of the MagickMapImage method is:
%
% MagickBooleanType MagickMapImage(MagickWand *wand,
% const MagickWand *map_wand,const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o map: the map wand.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
WandExport MagickBooleanType MagickMapImage(MagickWand *wand,
const MagickWand *map_wand,const MagickBooleanType dither)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL))
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=MapImage(wand->images,map_wand->images,dither);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a t t e F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMatteFloodfillImage() changes the transparency value of any pixel that
% matches target and is an immediate neighbor. If the method
% FillToBorderMethod is specified, the transparency value is changed for any
% neighbor pixel that does not match the bordercolor member of image.
%
% The format of the MagickMatteFloodfillImage method is:
%
% MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand,
% const double alpha,const double fuzz,const PixelWand *bordercolor,
% const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand,
const double alpha,const double fuzz,const PixelWand *bordercolor,
const ssize_t x,const ssize_t y)
{
DrawInfo
*draw_info;
MagickBooleanType
status;
PixelPacket
target;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL);
(void) GetOneVirtualPixel(wand->images,x % wand->images->columns,
y % wand->images->rows,&target,wand->exception);
if (bordercolor != (PixelWand *) NULL)
PixelGetQuantumColor(bordercolor,&target);
wand->images->fuzz=fuzz;
status=MatteFloodfillImage(wand->images,target,ClampToQuantum(
(MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor !=
(PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M e d i a n F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMedianFilterImage() applies a digital filter that improves the quality
% of a noisy image. Each pixel is replaced by the median in a set of
% neighboring pixels as defined by radius.
%
% The format of the MagickMedianFilterImage method is:
%
% MagickBooleanType MagickMedianFilterImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand,
const double radius)
{
Image
*median_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
median_image=MedianFilterImage(wand->images,radius,wand->exception);
if (median_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,median_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M i n i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMinimumImages() returns the minimum intensity of an image sequence.
%
% The format of the MagickMinimumImages method is:
%
% MagickWand *MagickMinimumImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMinimumImages(MagickWand *wand)
{
Image
*minimum_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
minimum_image=EvaluateImages(wand->images,MinEvaluateOperator,
wand->exception);
if (minimum_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,minimum_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickModeImage() makes each pixel the 'predominant color' of the
% neighborhood of the specified radius.
%
% The format of the MagickModeImage method is:
%
% MagickBooleanType MagickModeImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickModeImage(MagickWand *wand,
const double radius)
{
Image
*mode_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
mode_image=ModeImage(wand->images,radius,wand->exception);
if (mode_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,mode_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o s a i c I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMosaicImages() inlays an image sequence to form a single coherent
% picture. It returns a wand with each image in the sequence composited at
% the location defined by the page offset of the image.
%
% The format of the MagickMosaicImages method is:
%
% MagickWand *MagickMosaicImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMosaicImages(MagickWand *wand)
{
Image
*mosaic_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
mosaic_image=MosaicImages(wand->images,wand->exception);
if (mosaic_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,mosaic_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% The format of the MagickOpaqueImage method is:
%
% MagickBooleanType MagickOpaqueImage(MagickWand *wand,
% const PixelWand *target,const PixelWand *fill,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o target: Change this target color to the fill color within the image.
%
% o fill: the fill pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand,
const PixelWand *target,const PixelWand *fill,const double fuzz)
{
return(MagickPaintOpaqueImage(wand,target,fill,fuzz));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintFloodfillImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% The format of the MagickPaintFloodfillImage method is:
%
% MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand,
% const ChannelType channel,const PixelWand *fill,const double fuzz,
% const PixelWand *bordercolor,const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o fill: the floodfill color pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand,
const ChannelType channel,const PixelWand *fill,const double fuzz,
const PixelWand *bordercolor,const ssize_t x,const ssize_t y)
{
MagickBooleanType
status;
status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y,
MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% The format of the MagickPaintOpaqueImage method is:
%
% MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand,
% const PixelWand *target,const PixelWand *fill,const double fuzz)
% MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand,
% const ChannelType channel,const PixelWand *target,
% const PixelWand *fill,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o target: Change this target color to the fill color within the image.
%
% o fill: the fill pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand,
const PixelWand *target,const PixelWand *fill,const double fuzz)
{
return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz));
}
WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand,
const ChannelType channel,const PixelWand *target,const PixelWand *fill,
const double fuzz)
{
MagickBooleanType
status;
status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz,
MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintTransparentImage() changes any pixel that matches color with the
% color defined by fill.
%
% The format of the MagickPaintTransparentImage method is:
%
% MagickBooleanType MagickPaintTransparentImage(MagickWand *wand,
% const PixelWand *target,const double alpha,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o target: Change this target color to specified opacity value within
% the image.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand,
const PixelWand *target,const double alpha,const double fuzz)
{
return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R a d i a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRadialBlurImage() radial blurs an image.
%
% The format of the MagickRadialBlurImage method is:
%
% MagickBooleanType MagickRadialBlurImage(MagickWand *wand,
% const double angle)
% MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand,
% const ChannelType channel,const double angle)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the image channel(s).
%
% o angle: the angle of the blur in degrees.
%
*/
WandExport MagickBooleanType MagickRadialBlurImage(MagickWand *wand,
const double angle)
{
return(MagickRotationalBlurImage(wand,angle));
}
WandExport MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand,
const ChannelType channel,const double angle)
{
return(MagickRotationalBlurImageChannel(wand,channel,angle));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e c o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRecolorImage() apply color transformation to an image. The method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the MagickRecolorImage method is:
%
% MagickBooleanType MagickRecolorImage(MagickWand *wand,
% const size_t order,const double *color_matrix)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o order: the number of columns and rows in the color matrix.
%
% o color_matrix: An array of doubles representing the color matrix.
%
*/
WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand,
const size_t order,const double *color_matrix)
{
Image
*transform_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (color_matrix == (const double *) NULL)
return(MagickFalse);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
transform_image=RecolorImage(wand->images,order,color_matrix,
wand->exception);
if (transform_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,transform_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e d u c e N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickReduceNoiseImage() smooths the contours of an image while still
% preserving edge information. The algorithm works by replacing each pixel
% with its neighbor closest in value. A neighbor is defined by radius. Use
% a radius of 0 and ReduceNoise() selects a suitable radius for you.
%
% The format of the MagickReduceNoiseImage method is:
%
% MagickBooleanType MagickReduceNoiseImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand,
const double radius)
{
Image
*noise_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
noise_image=ReduceNoiseImage(wand->images,radius,wand->exception);
if (noise_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,noise_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a x i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMaximumImages() returns the maximum intensity of an image sequence.
%
% The format of the MagickMaximumImages method is:
%
% MagickWand *MagickMaximumImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMaximumImages(MagickWand *wand)
{
Image
*maximum_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator,
wand->exception);
if (maximum_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,maximum_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageAttribute() associates a property with an image.
%
% The format of the MagickSetImageAttribute method is:
%
% MagickBooleanType MagickSetImageAttribute(MagickWand *wand,
% const char *property,const char *value)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o property: the property.
%
% o value: the value.
%
*/
WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand,
const char *property,const char *value)
{
return(SetImageProperty(wand->images,property,value));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageIndex() set the current image to the position of the list
% specified with the index parameter.
%
% The format of the MagickSetImageIndex method is:
%
% MagickBooleanType MagickSetImageIndex(MagickWand *wand,
% const ssize_t index)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o index: the scene number.
%
*/
WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand,
const ssize_t index)
{
return(MagickSetIteratorIndex(wand,index));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k S e t I m a g e O p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageOption() associates one or options with a particular image
% format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes").
%
% The format of the MagickSetImageOption method is:
%
% MagickBooleanType MagickSetImageOption(MagickWand *wand,
% const char *format,const char *key,const char *value)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o format: the image format.
%
% o key: The key.
%
% o value: The value.
%
*/
WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand,
const char *format,const char *key,const char *value)
{
char
option[MaxTextExtent];
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
(void) FormatLocaleString(option,MaxTextExtent,"%s:%s=%s",format,key,value);
return(DefineImageOption(wand->image_info,option));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickTransparentImage() changes any pixel that matches color with the
% color defined by fill.
%
% The format of the MagickTransparentImage method is:
%
% MagickBooleanType MagickTransparentImage(MagickWand *wand,
% const PixelWand *target,const double alpha,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o target: Change this target color to specified opacity value within
% the image.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand,
const PixelWand *target,const double alpha,const double fuzz)
{
return(MagickPaintTransparentImage(wand,target,alpha,fuzz));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e g i o n O f I n t e r e s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRegionOfInterestImage() extracts a region of the image and returns it
% as a new wand.
%
% The format of the MagickRegionOfInterestImage method is:
%
% MagickWand *MagickRegionOfInterestImage(MagickWand *wand,
% const size_t width,const size_t height,const ssize_t x,
% const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o width: the region width.
%
% o height: the region height.
%
% o x: the region x offset.
%
% o y: the region y offset.
%
*/
WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand,
const size_t width,const size_t height,const ssize_t x,
const ssize_t y)
{
return(MagickGetImageRegion(wand,width,height,x,y));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImagePixels() accepts pixel datand stores it in the image at the
% location you specify. The method returns MagickFalse on success otherwise
% MagickTrue if an error is encountered. The pixel data can be either char,
% short int, int, ssize_t, float, or double in the order specified by map.
%
% Suppose your want to upload the first scanline of a 640x480 image from
% character data in red-green-blue order:
%
% MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the MagickSetImagePixels method is:
%
% MagickBooleanType MagickSetImagePixels(MagickWand *wand,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,const char *map,const StorageType storage,
% const void *pixels)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x, y, columns, rows: These values define the perimeter of a region
% of pixels you want to define.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from
% these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel,
% or DoublePixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
*/
WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,const char *map,const StorageType storage,
const void *pixels)
{
return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k W r i t e I m a g e B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickWriteImageBlob() implements direct to memory image formats. It
% returns the image as a blob and its length. Use MagickSetFormat() to
% set the format of the returned blob (GIF, JPEG, PNG, etc.).
%
% Use MagickRelinquishMemory() to free the blob when you are done with it.
%
% The format of the MagickWriteImageBlob method is:
%
% unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o length: the length of the blob.
%
*/
WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length)
{
return(MagickGetImageBlob(wand,length));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewPixelView() returns a pixel view required for all other methods in the
% Pixel View API.
%
% The format of the NewPixelView method is:
%
% PixelView *NewPixelView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands,
const size_t number_threads)
{
PixelWand
***pixel_wands;
register ssize_t
i;
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads));
}
return(pixel_wands);
}
WandExport PixelView *NewPixelView(MagickWand *wand)
{
PixelView
*pixel_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickCoreSignature);
pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view));
if (pixel_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) memset(pixel_view,0,sizeof(*pixel_view));
pixel_view->id=AcquireWandId();
(void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) pixel_view->id);
pixel_view->exception=AcquireExceptionInfo();
pixel_view->wand=wand;
pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images,
pixel_view->exception);
pixel_view->region.width=wand->images->columns;
pixel_view->region.height=wand->images->rows;
pixel_view->number_threads=GetOpenMPMaximumThreads();
pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width,
pixel_view->number_threads);
if (pixel_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
pixel_view->debug=IsEventLogging();
pixel_view->signature=WandSignature;
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w P i x e l V i e w R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewPixelViewRegion() returns a pixel view required for all other methods
% in the Pixel View API.
%
% The format of the NewPixelViewRegion method is:
%
% PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixel_wands view.
%
*/
WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
PixelView
*pixel_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickCoreSignature);
pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view));
if (pixel_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) memset(pixel_view,0,sizeof(*pixel_view));
pixel_view->id=AcquireWandId();
(void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) pixel_view->id);
pixel_view->exception=AcquireExceptionInfo();
pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images,
pixel_view->exception);
pixel_view->wand=wand;
pixel_view->region.width=width;
pixel_view->region.height=height;
pixel_view->region.x=x;
pixel_view->region.y=y;
pixel_view->number_threads=GetOpenMPMaximumThreads();
pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width,
pixel_view->number_threads);
if (pixel_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
pixel_view->debug=IsEventLogging();
pixel_view->signature=WandSignature;
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i x e l G e t N e x t R o w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PixelGetNextRow() returns the next row as an array of pixel wands from the
% pixel iterator.
%
% The format of the PixelGetNextRow method is:
%
% PixelWand **PixelGetNextRow(PixelIterator *iterator,
% size_t *number_wands)
%
% A description of each parameter follows:
%
% o iterator: the pixel iterator.
%
% o number_wands: the number of pixel wands.
%
*/
WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator)
{
size_t
number_wands;
return(PixelGetNextIteratorRow(iterator,&number_wands));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i x e l I t e r a t o r G e t E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PixelIteratorGetException() returns the severity, reason, and description of
% any error that occurs when using other methods in this API.
%
% The format of the PixelIteratorGetException method is:
%
% char *PixelIteratorGetException(const Pixeliterator *iterator,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o iterator: the pixel iterator.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *PixelIteratorGetException(const PixelIterator *iterator,
ExceptionType *severity)
{
return(PixelGetIteratorException(iterator,severity));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelViewIterator() iterates over the pixel view in parallel and calls
% your set method for each scanline of the view. The pixel region is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetPixelViewIterator method is:
%
% MagickBooleanType SetPixelViewIterator(PixelView *destination,
% SetPixelViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the pixel view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination,
SetPixelViewMethod set,void *context)
{
#define SetPixelViewTag "PixelView/Set"
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(destination != (PixelView *) NULL);
assert(destination->signature == WandSignature);
if (set == (SetPixelViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=destination->region.y; y < (ssize_t) destination->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x,
y,destination->region.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(destination->view);
if (set(destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetPixelIndex(indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(destination_image,SetPixelViewTag,progress,
destination->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferPixelViewIterator() iterates over two pixel views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% region is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination pixel view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferPixelViewIterator method is:
%
% MagickBooleanType TransferPixelViewIterator(PixelView *source,
% PixelView *destination,TransferPixelViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o destination: the destination pixel view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source,
PixelView *destination,TransferPixelViewMethod transfer,void *context)
{
#define TransferPixelViewTag "PixelView/Transfer"
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (TransferPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict pixels;
register IndexPacket
*magick_restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->region.x,y,destination->region.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (transfer(source,destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,TransferPixelViewTag,progress,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdatePixelViewIterator() iterates over the pixel view in parallel and calls
% your update method for each scanline of the view. The pixel region is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdatePixelViewIterator method is:
%
% MagickBooleanType UpdatePixelViewIterator(PixelView *source,
% UpdatePixelViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source,
UpdatePixelViewMethod update,void *context)
{
#define UpdatePixelViewTag "PixelView/Update"
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (update == (UpdatePixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y,
source->region.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(
source->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (update(source,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->region.width; x++)
PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
SetPixelIndex(indexes+x,PixelGetBlackQuantum(
source->pixel_wands[id][x]));
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
#endif
|
dynamic_module_load.c | // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-aarch64-unknown-linux-gnu -ldl && %libomptarget-run-aarch64-unknown-linux-gnu %t.so 2>&1 | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-powerpc64-ibm-linux-gnu -ldl && %libomptarget-run-powerpc64-ibm-linux-gnu %t.so 2>&1 | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-powerpc64le-ibm-linux-gnu -ldl && %libomptarget-run-powerpc64le-ibm-linux-gnu %t.so 2>&1 | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-x86_64-pc-linux-gnu -ldl && %libomptarget-run-x86_64-pc-linux-gnu %t.so 2>&1 | %fcheck-x86_64-pc-linux-gnu
#ifdef SHARED
#include <stdio.h>
int foo() {
#pragma omp target
;
printf("%s\n", "DONE.");
return 0;
}
#else
#include <dlfcn.h>
#include <stdio.h>
int main(int argc, char **argv) {
void *Handle = dlopen(argv[1], RTLD_NOW);
int (*Foo)(void);
if (Handle == NULL) {
printf("dlopen() failed: %s\n", dlerror());
return 1;
}
Foo = (int (*)(void)) dlsym(Handle, "foo");
if (Handle == NULL) {
printf("dlsym() failed: %s\n", dlerror());
return 1;
}
// CHECK: DONE.
// CHECK-NOT: {{abort|fault}}
return Foo();
}
#endif
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
GB_binop__bor_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_uint16)
// A*D function (colscale): GB (_AxD__bor_uint16)
// D*A function (rowscale): GB (_DxB__bor_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__bor_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_uint16)
// C=scalar+B GB (_bind1st__bor_uint16)
// C=scalar+B' GB (_bind1st_tran__bor_uint16)
// C=A+scalar GB (_bind2nd__bor_uint16)
// C=A'+scalar GB (_bind2nd_tran__bor_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_UINT16 || GxB_NO_BOR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__isfinite_bool_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isfinite_bool_fc64)
// op(A') function: GB (_unop_tran__isfinite_bool_fc64)
// C type: bool
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = GB_cisfinite (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cisfinite (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = GB_cisfinite (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isfinite_bool_fc64)
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = GB_cisfinite (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = GB_cisfinite (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isfinite_bool_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convoaa.c | /* Copyright 2015. The Regents of the University of California.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2012 Martin Uecker uecker@eecs.berkeley.edu
*/
#include <assert.h>
#include "misc/misc.h"
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/conv.h"
#include "num/vecops.h"
#include "convoaa.h"
void overlapandadd(int N, const long dims[N], const long blk[N], complex float* dst, complex float* src1, const long dim2[N], complex float* src2)
{
long ndims[2 * N];
long L[2 * N];
long ndim2[2 * N];
long ndim3[2 * N];
for (int i = 0; i < N; i++) {
assert(0 == dims[i] % blk[i]);
assert(dim2[i] <= blk[i]);
ndims[i * 2 + 1] = dims[i] / blk[i];
ndims[i * 2 + 0] = blk[i];
L[i * 2 + 1] = dims[i] / blk[i];
L[i * 2 + 0] = blk[i] + dim2[i] - 1;
ndim2[i * 2 + 1] = 1;
ndim2[i * 2 + 0] = dim2[i];
ndim3[i * 2 + 1] = dims[i] / blk[i] + 1;
ndim3[i * 2 + 0] = blk[i];
}
complex float* tmp = md_alloc(2 * N, L, CFL_SIZE);
// conv_causal_extend(2 * N, L, tmp, ndims, src1, ndim2, src2);
conv(2 * N, ~0, CONV_EXTENDED, CONV_CAUSAL, L, tmp, ndims, src1, ndim2, src2);
// [------++++||||||||
//long str1[2 * N];
long str2[2 * N];
long str3[2 * N];
//md_calc_strides(2 * N, str1, ndims, 8);
md_calc_strides(2 * N, str2, L, 8);
md_calc_strides(2 * N, str3, ndim3, 8);
md_clear(2 * N, ndim3, dst, CFL_SIZE);
md_zadd2(2 * N, L, str3, dst, str3, dst, str2, tmp);
md_free(tmp);
}
void overlapandsave(int N, const long dims[N], const long blk[N], complex float* dst, complex float* src1, const long dim2[N], complex float* src2)
{
// [------++++
// [------
long ndims[2 * N];
long L[2 * N];
long ndim2[2 * N];
long ndim3[2 * N];
for (int i = 0; i < N; i++) {
assert(0 == dims[i] % blk[i]);
assert(dim2[i] <= blk[i]);
ndims[i * 2 + 1] = dims[i] / blk[i];
ndims[i * 2 + 0] = blk[i];
L[i * 2 + 1] = dims[i] / blk[i];
L[i * 2 + 0] = blk[i] + dim2[i] - 1;
ndim2[i * 2 + 1] = 1;
ndim2[i * 2 + 0] = dim2[i];
ndim3[i * 2 + 1] = dims[i] / blk[i] - 0;
ndim3[i * 2 + 0] = blk[i];
}
complex float* tmp = md_alloc(2 * N, L, CFL_SIZE);
long str1[2 * N];
long str2[2 * N];
long str3[2 * N];
md_calc_strides(2 * N, str1, ndims, 8);
md_calc_strides(2 * N, str2, L, 8);
md_calc_strides(2 * N, str3, ndim3, 8);
md_clear(2 * N, L, tmp, 8);
md_copy2(2 * N, ndim3, str2, tmp, str1, src1, 8);
conv(2 * N, ~0, CONV_VALID, CONV_CAUSAL, ndims, dst, L, tmp, ndim2, src2);
md_free(tmp);
}
#if 0
struct conv_plan* overlapandsave_plan(int N, const long dims[N], const long blk[N], const long dim2[N], complex float* src2)
{
return conv_plan(2 * N, ~0, CONV_VALID, CONV_CAUSAL, ndims, L, ndim2, src2);
}
void overlapandsave_exec(struct conv_plan* plan, int N, const long dims[N], const long blk[N], complex float* dst, complex float* src1, const long dim2[N])
{
md_clear(2 * N, L, tmp, 8);
md_copy2(2 * N, ndim3, str2, tmp, str1, src1, 8);
conv_exec(plan, dst, tmp);
xfree(tmp);
}
#endif
void overlapandsave2(int N, unsigned int flags, const long blk[N], const long odims[N], complex float* dst, const long dims1[N], const complex float* src1, const long dims2[N], const complex float* src2)
{
long dims1B[N];
long tdims[2 * N];
long nodims[2 * N];
long ndims1[2 * N];
long ndims2[2 * N];
long shift[2 * N];
unsigned int nflags = 0;
for (int i = 0; i < N; i++) {
if (MD_IS_SET(flags, i)) {
nflags = MD_SET(nflags, 2 * i);
assert(1 == dims2[i] % 2);
assert(0 == blk[i] % 2);
assert(0 == dims1[i] % 2);
assert(0 == odims[i] % blk[i]);
assert(0 == dims1[i] % blk[i]);
assert(dims1[i] == odims[i]);
assert(dims2[i] <= blk[i]);
assert(dims1[i] >= dims2[i]);
// blocked output
nodims[i * 2 + 1] = odims[i] / blk[i];
nodims[i * 2 + 0] = blk[i];
// expanded temporary storage
tdims[i * 2 + 1] = dims1[i] / blk[i];
tdims[i * 2 + 0] = blk[i] + dims2[i] - 1;
// blocked input
// ---|---,---,---|---
// + +++ +
// + +++ +
// resized input
dims1B[i] = dims1[i] + 2 * blk[i];
ndims1[i * 2 + 1] = dims1[i] / blk[i] + 2; // do we need two full blocks?
ndims1[i * 2 + 0] = blk[i];
shift[i * 2 + 1] = 0;
shift[i * 2 + 0] = blk[i] - (dims2[i] - 1) / 2;
// kernel
ndims2[i * 2 + 1] = 1;
ndims2[i * 2 + 0] = dims2[i];
} else {
nodims[i * 2 + 1] = 1;
nodims[i * 2 + 0] = odims[i];
tdims[i * 2 + 1] = 1;
tdims[i * 2 + 0] = dims1[i];
ndims1[i * 2 + 1] = 1;
ndims1[i * 2 + 0] = dims1[i];
shift[i * 2 + 1] = 0;
shift[i * 2 + 0] = 0;
dims1B[i] = dims1[i];
ndims2[i * 2 + 1] = 1;
ndims2[i * 2 + 0] = dims2[i];
}
}
complex float* src1B = md_alloc(N, dims1B, CFL_SIZE);
md_resize_center(N, dims1B, src1B, dims1, src1, CFL_SIZE);
complex float* tmp = md_alloc(2 * N, tdims, CFL_SIZE);
long str1[2 * N];
long str2[2 * N];
md_calc_strides(2 * N, str1, ndims1, CFL_SIZE);
md_calc_strides(2 * N, str2, tdims, CFL_SIZE);
long off = md_calc_offset(2 * N, str1, shift);
md_copy2(2 * N, tdims, str2, tmp, str1, ((void*)src1B) + off, CFL_SIZE);
md_free(src1B);
conv(2 * N, nflags, CONV_VALID, CONV_SYMMETRIC, nodims, dst, tdims, tmp, ndims2, src2);
md_free(tmp);
}
void overlapandsave2H(int N, unsigned int flags, const long blk[N], const long dims1[N], complex float* dst, const long odims[N], const complex float* src1, const long dims2[N], const complex float* src2)
{
long dims1B[N];
long tdims[2 * N];
long nodims[2 * N];
long ndims1[2 * N];
long ndims2[2 * N];
long shift[2 * N];
unsigned int nflags = 0;
for (int i = 0; i < N; i++) {
if (MD_IS_SET(flags, i)) {
nflags = MD_SET(nflags, 2 * i);
assert(1 == dims2[i] % 2);
assert(0 == blk[i] % 2);
assert(0 == dims1[i] % 2);
assert(0 == odims[i] % blk[i]);
assert(0 == dims1[i] % blk[i]);
assert(dims1[i] == odims[i]);
assert(dims2[i] <= blk[i]);
assert(dims1[i] >= dims2[i]);
// blocked output
nodims[i * 2 + 1] = odims[i] / blk[i];
nodims[i * 2 + 0] = blk[i];
// expanded temporary storage
tdims[i * 2 + 1] = dims1[i] / blk[i];
tdims[i * 2 + 0] = blk[i] + dims2[i] - 1;
// blocked input
// ---|---,---,---|---
// + +++ +
// + +++ +
// resized input
dims1B[i] = dims1[i] + 2 * blk[i];
ndims1[i * 2 + 1] = dims1[i] / blk[i] + 2; // do we need two full blocks?
ndims1[i * 2 + 0] = blk[i];
shift[i * 2 + 1] = 0;
shift[i * 2 + 0] = blk[i] - (dims2[i] - 1) / 2;
// kernel
ndims2[i * 2 + 1] = 1;
ndims2[i * 2 + 0] = dims2[i];
} else {
nodims[i * 2 + 1] = 1;
nodims[i * 2 + 0] = odims[i];
tdims[i * 2 + 1] = 1;
tdims[i * 2 + 0] = dims1[i];
ndims1[i * 2 + 1] = 1;
ndims1[i * 2 + 0] = dims1[i];
shift[i * 2 + 1] = 0;
shift[i * 2 + 0] = 0;
dims1B[i] = dims1[i];
ndims2[i * 2 + 1] = 1;
ndims2[i * 2 + 0] = dims2[i];
}
}
complex float* tmp = md_alloc(2 * N, tdims, CFL_SIZE);
//conv(2 * N, flags, CONV_VALID, CONV_SYMMETRIC, nodims, dst, tdims, tmp, ndims2, src2);
convH(2 * N, nflags, CONV_VALID, CONV_SYMMETRIC, tdims, tmp, nodims, src1, ndims2, src2);
complex float* src1B = md_alloc(N, dims1B, CFL_SIZE);
long str1[2 * N];
long str2[2 * N];
md_calc_strides(2 * N, str1, ndims1, CFL_SIZE);
md_calc_strides(2 * N, str2, tdims, CFL_SIZE);
long off = md_calc_offset(2 * N, str1, shift);
md_clear(N, dims1B, src1B, CFL_SIZE);
//md_copy2(2 * N, tdims, str1, ((void*)src1B) + off, str2, tmp, sizeof(complex float));// FIXME:
md_zadd2(2 * N, tdims, str1, ((void*)src1B) + off, str1, ((void*)src1B) + off, str2, tmp);
md_resize_center(N, dims1, dst, dims1B, src1B, CFL_SIZE);
md_free(src1B);
md_free(tmp);
}
void overlapandsave2NE(int N, unsigned int flags, const long blk[N], const long odims[N], complex float* dst, const long dims1[N], complex float* src1, const long dims2[N], complex float* src2, const long mdims[N], complex float* msk)
{
long dims1B[N];
long tdims[2 * N];
long nodims[2 * N];
long ndims1[2 * N];
long ndims2[2 * N];
long shift[2 * N];
unsigned int nflags = 0;
for (int i = 0; i < N; i++) {
if (MD_IS_SET(flags, i)) {
nflags = MD_SET(nflags, 2 * i);
assert(1 == dims2[i] % 2);
assert(0 == blk[i] % 2);
assert(0 == dims1[i] % 2);
assert(0 == odims[i] % blk[i]);
assert(0 == dims1[i] % blk[i]);
assert(dims1[i] == odims[i]);
assert(dims2[i] <= blk[i]);
assert(dims1[i] >= dims2[i]);
// blocked output
nodims[i * 2 + 1] = odims[i] / blk[i];
nodims[i * 2 + 0] = blk[i];
// expanded temporary storage
tdims[i * 2 + 1] = dims1[i] / blk[i];
tdims[i * 2 + 0] = blk[i] + dims2[i] - 1;
// blocked input
// ---|---,---,---|---
// + +++ +
// + +++ +
// resized input
dims1B[i] = dims1[i] + 2 * blk[i];
ndims1[i * 2 + 1] = dims1[i] / blk[i] + 2; // do we need two full blocks?
ndims1[i * 2 + 0] = blk[i];
shift[i * 2 + 1] = 0;
shift[i * 2 + 0] = blk[i] - (dims2[i] - 1) / 2;
// kernel
ndims2[i * 2 + 1] = 1;
ndims2[i * 2 + 0] = dims2[i];
} else {
nodims[i * 2 + 1] = 1;
nodims[i * 2 + 0] = odims[i];
tdims[i * 2 + 1] = 1;
tdims[i * 2 + 0] = dims1[i];
ndims1[i * 2 + 1] = 1;
ndims1[i * 2 + 0] = dims1[i];
shift[i * 2 + 1] = 0;
shift[i * 2 + 0] = 0;
dims1B[i] = dims1[i];
ndims2[i * 2 + 1] = 1;
ndims2[i * 2 + 0] = dims2[i];
}
}
complex float* src1B = md_alloc(N, dims1B, CFL_SIZE);
complex float* tmp = md_alloc(2 * N, tdims, CFL_SIZE);
complex float* tmpX = md_alloc(N, odims, CFL_SIZE);
long str1[2 * N];
long str2[2 * N];
md_calc_strides(2 * N, str1, ndims1, sizeof(complex float));
md_calc_strides(2 * N, str2, tdims, sizeof(complex float));
long off = md_calc_offset(2 * N, str1, shift);
md_resize_center(N, dims1B, src1B, dims1, src1, sizeof(complex float));
// we can loop here
md_copy2(2 * N, tdims, str2, tmp, str1, ((void*)src1B) + off, sizeof(complex float));
conv(2 * N, nflags, CONV_VALID, CONV_SYMMETRIC, nodims, tmpX, tdims, tmp, ndims2, src2);
long ostr[N];
long mstr[N];
md_calc_strides(N, ostr, odims, sizeof(complex float));
md_calc_strides(N, mstr, mdims, sizeof(complex float));
md_zmul2(N, odims, ostr, tmpX, ostr, tmpX, mstr, msk);
convH(2 * N, nflags, CONV_VALID, CONV_SYMMETRIC, tdims, tmp, nodims, tmpX, ndims2, src2);
md_clear(N, dims1B, src1B, sizeof(complex float));
md_zadd2(2 * N, tdims, str1, ((void*)src1B) + off, str1, ((void*)src1B) + off, str2, tmp);
//
md_resize_center(N, dims1, dst, dims1B, src1B, sizeof(complex float));
md_free(src1B);
md_free(tmpX);
md_free(tmp);
}
void overlapandsave2NEB(int N, unsigned int flags, const long blk[N], const long odims[N], complex float* dst, const long dims1[N], const complex float* src1, const long dims2[N], const complex float* src2, const long mdims[N], const complex float* msk)
{
long dims1B[N];
long tdims[2 * N];
long nodims[2 * N];
long ndims2[2 * N];
long nmdims[2 * N];
int e = N;
for (int i = 0; i < N; i++) {
if (MD_IS_SET(flags, i)) {
assert(1 == dims2[i] % 2);
assert(0 == blk[i] % 2);
assert(0 == dims1[i] % 2);
assert(0 == odims[i] % blk[i]);
assert(0 == dims1[i] % blk[i]);
assert(dims1[i] == odims[i]);
assert(dims2[i] <= blk[i]);
assert(dims1[i] >= dims2[i]);
assert((1 == mdims[i]) || (mdims[i] == dims1[i]));
// blocked output
nodims[e] = odims[i] / blk[i];
nodims[i] = blk[i];
// expanded temporary storage
tdims[e] = dims1[i] / blk[i];
tdims[i] = blk[i] + dims2[i] - 1;
// blocked input
// ---|---,---,---|---
// + +++ +
// + +++ +
if (1 == mdims[i]) {
nmdims[2 * i + 1] = 1;
nmdims[2 * i + 1] = 1;
} else {
nmdims[2 * i + 1] = mdims[i] / blk[i];
nmdims[2 * i + 0] = blk[i];
}
// resized input
// minimal padding
dims1B[i] = dims1[i] + (dims2[i] - 1);
// kernel
ndims2[e] = 1;
ndims2[i] = dims2[i];
e++;
} else {
nodims[i] = odims[i];
tdims[i] = dims1[i];
nmdims[2 * i + 1] = 1;
nmdims[2 * i + 0] = mdims[i];
dims1B[i] = dims1[i];
ndims2[i] = dims2[i];
}
}
int NE = e;
//long S = md_calc_size(N, dims1B, 1);
long str1[NE];
long str1B[N];
md_calc_strides(N, str1B, dims1B, sizeof(complex float));
e = N;
for (int i = 0; i < N; i++) {
str1[i] = str1B[i];
if (MD_IS_SET(flags, i))
str1[e++] = str1B[i] * blk[i];
}
assert(NE == e);
long str2[NE];
md_calc_strides(NE, str2, tdims, sizeof(complex float));
long ostr[NE];
long mstr[NE];
long mstrB[2 * N];
md_calc_strides(NE, ostr, nodims, sizeof(complex float));
md_calc_strides(2 * N, mstrB, nmdims, sizeof(complex float));
e = N;
for (int i = 0; i < N; i++) {
mstr[i] = mstrB[2 * i + 0];
if (MD_IS_SET(flags, i))
mstr[e++] = mstrB[2 * i + 1];
}
assert(NE == e);
const complex float* src1B = src1;//!
//complex float* src1B = xmalloc(S * sizeof(complex float));
//md_resizec(N, dims1B, src1B, dims1, src1, sizeof(complex float));
// we can loop here
assert(NE == N + 3);
assert(1 == ndims2[N + 0]);
assert(1 == ndims2[N + 1]);
assert(1 == ndims2[N + 2]);
assert(tdims[N + 0] == nodims[N + 0]);
assert(tdims[N + 1] == nodims[N + 1]);
assert(tdims[N + 2] == nodims[N + 2]);
//complex float* src1C = xmalloc(S * sizeof(complex float));
complex float* src1C = dst;
md_clear(N, dims1B, src1C, sizeof(complex float)); // must be done here
#pragma omp parallel for collapse(3)
for (int k = 0; k < nodims[N + 2]; k++) {
for (int j = 0; j < nodims[N + 1]; j++) {
for (int i = 0; i < nodims[N + 0]; i++) {
complex float* tmp = md_alloc_sameplace(N, tdims, CFL_SIZE, dst);
complex float* tmpX = md_alloc_sameplace(N, nodims, CFL_SIZE, dst);
long off1 = str1[N + 0] * i + str1[N + 1] * j + str1[N + 2] * k;
long off2 = mstr[N + 0] * i + mstr[N + 1] * j + mstr[N + 2] * k;
md_copy2(N, tdims, str2, tmp, str1, ((const void*)src1B) + off1, sizeof(complex float));
conv(N, flags, CONV_VALID, CONV_SYMMETRIC, nodims, tmpX, tdims, tmp, ndims2, src2);
md_zmul2(N, nodims, ostr, tmpX, ostr, tmpX, mstr, ((const void*)msk) + off2);
convH(N, flags, CONV_VALID, CONV_SYMMETRIC, tdims, tmp, nodims, tmpX, ndims2, src2);
#pragma omp critical
md_zadd2(N, tdims, str1, ((void*)src1C) + off1, str1, ((void*)src1C) + off1, str2, tmp);
md_free(tmpX);
md_free(tmp);
}}}
//md_resizec(N, dims1, dst, dims1B, src1C, sizeof(complex float));
//xfree(src1C);
//xfree(src1B);
}
void overlapandsave2HB(int N, unsigned int flags, const long blk[N], const long dims1[N], complex float* dst, const long odims[N], const complex float* src1, const long dims2[N], const complex float* src2, const long mdims[N], const complex float* msk)
{
long dims1B[N];
long tdims[2 * N];
long nodims[2 * N];
long ndims2[2 * N];
long nmdims[2 * N];
int e = N;
for (int i = 0; i < N; i++) {
if (MD_IS_SET(flags, i)) {
assert(1 == dims2[i] % 2);
assert(0 == blk[i] % 2);
assert(0 == dims1[i] % 2);
assert(0 == odims[i] % blk[i]);
assert(0 == dims1[i] % blk[i]);
assert(dims1[i] == odims[i]);
assert(dims2[i] <= blk[i]);
assert(dims1[i] >= dims2[i]);
assert((1 == mdims[i]) || (mdims[i] == dims1[i]));
// blocked output
nodims[e] = odims[i] / blk[i];
nodims[i] = blk[i];
// expanded temporary storage
tdims[e] = dims1[i] / blk[i];
tdims[i] = blk[i] + dims2[i] - 1;
// blocked input
// ---|---,---,---|---
// + +++ +
// + +++ +
if (1 == mdims[i]) {
nmdims[2 * i + 1] = 1;
nmdims[2 * i + 1] = 1;
} else {
nmdims[2 * i + 1] = mdims[i] / blk[i];
nmdims[2 * i + 0] = blk[i];
}
// resized input
// minimal padding
dims1B[i] = dims1[i] + (dims2[i] - 1);
// kernel
ndims2[e] = 1;
ndims2[i] = dims2[i];
e++;
} else {
nodims[i] = odims[i];
tdims[i] = dims1[i];
nmdims[2 * i + 1] = 1;
nmdims[2 * i + 0] = mdims[i];
dims1B[i] = dims1[i];
ndims2[i] = dims2[i];
}
}
int NE = e;
// long S = md_calc_size(N, dims1B, 1);
long str1[NE];
long str1B[N];
md_calc_strides(N, str1B, dims1B, sizeof(complex float));
e = N;
for (int i = 0; i < N; i++) {
str1[i] = str1B[i];
if (MD_IS_SET(flags, i))
str1[e++] = str1B[i] * blk[i];
}
assert(NE == e);
long str2[NE];
md_calc_strides(NE, str2, tdims, sizeof(complex float));
long ostr[NE];
long mstr[NE];
long mstrB[2 * N];
md_calc_strides(NE, ostr, nodims, sizeof(complex float));
md_calc_strides(2 * N, mstrB, nmdims, sizeof(complex float));
e = N;
for (int i = 0; i < N; i++) {
mstr[i] = mstrB[2 * i + 0];
if (MD_IS_SET(flags, i))
mstr[e++] = mstrB[2 * i + 1];
}
assert(NE == e);
// we can loop here
assert(NE == N + 3);
assert(1 == ndims2[N + 0]);
assert(1 == ndims2[N + 1]);
assert(1 == ndims2[N + 2]);
assert(tdims[N + 0] == nodims[N + 0]);
assert(tdims[N + 1] == nodims[N + 1]);
assert(tdims[N + 2] == nodims[N + 2]);
//complex float* src1C = xmalloc(S * sizeof(complex float));
complex float* src1C = dst;
md_clear(N, dims1B, src1C, CFL_SIZE); // must be done here
#pragma omp parallel for collapse(3)
for (int k = 0; k < nodims[N + 2]; k++) {
for (int j = 0; j < nodims[N + 1]; j++) {
for (int i = 0; i < nodims[N + 0]; i++) {
complex float* tmp = md_alloc_sameplace(N, tdims, CFL_SIZE, dst);
complex float* tmpX = md_alloc_sameplace(N, nodims, CFL_SIZE, dst);
long off1 = str1[N + 0] * i + str1[N + 1] * j + str1[N + 2] * k;
long off2 = mstr[N + 0] * i + mstr[N + 1] * j + mstr[N + 2] * k;
long off3 = ostr[N + 0] * i + ostr[N + 1] * j + ostr[N + 2] * k;
md_zmul2(N, nodims, ostr, tmpX, ostr, ((const void*)src1) + off3, mstr, ((const void*)msk) + off2);
convH(N, flags, CONV_VALID, CONV_SYMMETRIC, tdims, tmp, nodims, tmpX, ndims2, src2);
#pragma omp critical
md_zadd2(N, tdims, str1, ((void*)src1C) + off1, str1, ((void*)src1C) + off1, str2, tmp);
md_free(tmpX);
md_free(tmp);
}}}
}
|
fourier.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF OOO U U RRRR IIIII EEEEE RRRR %
% F O O U U R R I E R R %
% FFF O O U U RRRR I EEE RRRR %
% F O O U U R R I E R R %
% F OOO UUU R R IIIII EEEEE R R %
% %
% %
% MagickCore Discrete Fourier Transform Methods %
% %
% Software Design %
% Sean Burke %
% Fred Weinhaus %
% Cristy %
% July 2009 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/fourier.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#if defined(MAGICKCORE_FFTW_DELEGATE)
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
#include <complex.h>
#endif
#include <fftw3.h>
#if !defined(MAGICKCORE_HAVE_CABS)
#define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1]))
#endif
#if !defined(MAGICKCORE_HAVE_CARG)
#define carg(z) (atan2(cimag(z),creal(z)))
#endif
#if !defined(MAGICKCORE_HAVE_CIMAG)
#define cimag(z) (z[1])
#endif
#if !defined(MAGICKCORE_HAVE_CREAL)
#define creal(z) (z[0])
#endif
#endif
/*
Typedef declarations.
*/
typedef struct _FourierInfo
{
PixelChannel
channel;
MagickBooleanType
modulus;
size_t
width,
height;
ssize_t
center;
} FourierInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p l e x I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ComplexImages() performs complex mathematics on an image sequence.
%
% The format of the ComplexImages method is:
%
% MagickBooleanType ComplexImages(Image *images,const ComplexOperator op,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A complex operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op,
ExceptionInfo *exception)
{
#define ComplexImageTag "Complex/Image"
CacheView
*Ai_view,
*Ar_view,
*Bi_view,
*Br_view,
*Ci_view,
*Cr_view;
const char
*artifact;
const Image
*Ai_image,
*Ar_image,
*Bi_image,
*Br_image;
double
snr;
Image
*Ci_image,
*complex_images,
*Cr_image,
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
number_channels;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (images->next == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",images->filename);
return((Image *) NULL);
}
image=CloneImage(images,0,0,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImageList(image);
return(image);
}
image->depth=32UL;
complex_images=NewImageList();
AppendImageToList(&complex_images,image);
image=CloneImage(images,0,0,MagickTrue,exception);
if (image == (Image *) NULL)
{
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
AppendImageToList(&complex_images,image);
/*
Apply complex mathematics to image pixels.
*/
artifact=GetImageArtifact(image,"complex:snr");
snr=0.0;
if (artifact != (const char *) NULL)
snr=StringToDouble(artifact,(char **) NULL);
Ar_image=images;
Ai_image=images->next;
Br_image=images;
Bi_image=images->next;
if ((images->next->next != (Image *) NULL) &&
(images->next->next->next != (Image *) NULL))
{
Br_image=images->next->next;
Bi_image=images->next->next->next;
}
Cr_image=complex_images;
Ci_image=complex_images->next;
number_channels=MagickMin(MagickMin(MagickMin(
Ar_image->number_channels,Ai_image->number_channels),MagickMin(
Br_image->number_channels,Bi_image->number_channels)),MagickMin(
Cr_image->number_channels,Ci_image->number_channels));
Ar_view=AcquireVirtualCacheView(Ar_image,exception);
Ai_view=AcquireVirtualCacheView(Ai_image,exception);
Br_view=AcquireVirtualCacheView(Br_image,exception);
Bi_view=AcquireVirtualCacheView(Bi_image,exception);
Cr_view=AcquireAuthenticCacheView(Cr_image,exception);
Ci_view=AcquireAuthenticCacheView(Ci_image,exception);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(Cr_image,complex_images,Cr_image->rows,1L)
#endif
for (y=0; y < (ssize_t) Cr_image->rows; y++)
{
register const Quantum
*magick_restrict Ai,
*magick_restrict Ar,
*magick_restrict Bi,
*magick_restrict Br;
register Quantum
*magick_restrict Ci,
*magick_restrict Cr;
register ssize_t
x;
if (status == MagickFalse)
continue;
Ar=GetCacheViewVirtualPixels(Ar_view,0,y,Cr_image->columns,1,exception);
Ai=GetCacheViewVirtualPixels(Ai_view,0,y,Cr_image->columns,1,exception);
Br=GetCacheViewVirtualPixels(Br_view,0,y,Cr_image->columns,1,exception);
Bi=GetCacheViewVirtualPixels(Bi_view,0,y,Cr_image->columns,1,exception);
Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,Cr_image->columns,1,exception);
Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,Ci_image->columns,1,exception);
if ((Ar == (const Quantum *) NULL) || (Ai == (const Quantum *) NULL) ||
(Br == (const Quantum *) NULL) || (Bi == (const Quantum *) NULL) ||
(Cr == (Quantum *) NULL) || (Ci == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) Cr_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) number_channels; i++)
{
switch (op)
{
case AddComplexOperator:
{
Cr[i]=Ar[i]+Br[i];
Ci[i]=Ai[i]+Bi[i];
break;
}
case ConjugateComplexOperator:
default:
{
Cr[i]=Ar[i];
Ci[i]=(-Bi[i]);
break;
}
case DivideComplexOperator:
{
double
gamma;
gamma=PerceptibleReciprocal((double) Br[i]*Br[i]+Bi[i]*Bi[i]+snr);
Cr[i]=gamma*((double) Ar[i]*Br[i]+(double) Ai[i]*Bi[i]);
Ci[i]=gamma*((double) Ai[i]*Br[i]-(double) Ar[i]*Bi[i]);
break;
}
case MagnitudePhaseComplexOperator:
{
Cr[i]=sqrt((double) Ar[i]*Ar[i]+(double) Ai[i]*Ai[i]);
Ci[i]=atan2((double) Ai[i],(double) Ar[i])/(2.0*MagickPI)+0.5;
break;
}
case MultiplyComplexOperator:
{
Cr[i]=QuantumScale*((double) Ar[i]*Br[i]-(double) Ai[i]*Bi[i]);
Ci[i]=QuantumScale*((double) Ai[i]*Br[i]+(double) Ar[i]*Bi[i]);
break;
}
case RealImaginaryComplexOperator:
{
Cr[i]=Ar[i]*cos(2.0*MagickPI*(Ai[i]-0.5));
Ci[i]=Ar[i]*sin(2.0*MagickPI*(Ai[i]-0.5));
break;
}
case SubtractComplexOperator:
{
Cr[i]=Ar[i]-Br[i];
Ci[i]=Ai[i]-Bi[i];
break;
}
}
}
Ar+=GetPixelChannels(Ar_image);
Ai+=GetPixelChannels(Ai_image);
Br+=GetPixelChannels(Br_image);
Bi+=GetPixelChannels(Bi_image);
Cr+=GetPixelChannels(Cr_image);
Ci+=GetPixelChannels(Ci_image);
}
if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse)
status=MagickFalse;
if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,ComplexImageTag,progress,images->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
Cr_view=DestroyCacheView(Cr_view);
Ci_view=DestroyCacheView(Ci_view);
Br_view=DestroyCacheView(Br_view);
Bi_view=DestroyCacheView(Bi_view);
Ar_view=DestroyCacheView(Ar_view);
Ai_view=DestroyCacheView(Ai_view);
if (status == MagickFalse)
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r w a r d F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ForwardFourierTransformImage() implements the discrete Fourier transform
% (DFT) of the image either as a magnitude / phase or real / imaginary image
% pair.
%
% The format of the ForwadFourierTransformImage method is:
%
% Image *ForwardFourierTransformImage(const Image *image,
% const MagickBooleanType modulus,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulus: if true, return as transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType RollFourier(const size_t width,const size_t height,
const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels)
{
double
*source_pixels;
MemoryInfo
*source_info;
register ssize_t
i,
x;
ssize_t
u,
v,
y;
/*
Move zero frequency (DC, average color) from (0,0) to (width/2,height/2).
*/
source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
return(MagickFalse);
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
i=0L;
for (y=0L; y < (ssize_t) height; y++)
{
if (y_offset < 0L)
v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset;
else
v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height :
y+y_offset;
for (x=0L; x < (ssize_t) width; x++)
{
if (x_offset < 0L)
u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset;
else
u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width :
x+x_offset;
source_pixels[v*width+u]=roll_pixels[i++];
}
}
(void) memcpy(roll_pixels,source_pixels,height*width*
sizeof(*source_pixels));
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType ForwardQuadrantSwap(const size_t width,
const size_t height,double *source_pixels,double *forward_pixels)
{
MagickBooleanType
status;
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,
source_pixels);
if (status == MagickFalse)
return(MagickFalse);
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x];
for (y=1; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[(height-y)*width+width/2L-x-1L]=
source_pixels[y*center+x+1L];
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[width/2L-x-1L]=source_pixels[x+1L];
return(MagickTrue);
}
static void CorrectPhaseLHS(const size_t width,const size_t height,
double *fourier_pixels)
{
register ssize_t
x;
ssize_t
y;
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
fourier_pixels[y*width+x]*=(-1.0);
}
static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info,
Image *image,double *magnitude,double *phase,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*magnitude_pixels,
*phase_pixels;
Image
*magnitude_image,
*phase_image;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
register Quantum
*q;
register ssize_t
x;
ssize_t
i,
y;
magnitude_image=GetFirstImageInList(image);
phase_image=GetNextImageInList(image);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",image->filename);
return(MagickFalse);
}
/*
Create "Fourier Transform" image from constituent arrays.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
(void) memset(magnitude_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*magnitude_pixels));
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
(void) memset(phase_pixels,0,fourier_info->width*
fourier_info->height*sizeof(*phase_pixels));
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude,magnitude_pixels);
if (status != MagickFalse)
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase,
phase_pixels);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]/=(2.0*MagickPI);
phase_pixels[i]+=0.5;
i++;
}
}
magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception);
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
SetPixelRed(magnitude_image,ClampToQuantum(QuantumRange*
magnitude_pixels[i]),q);
break;
}
case GreenPixelChannel:
{
SetPixelGreen(magnitude_image,ClampToQuantum(QuantumRange*
magnitude_pixels[i]),q);
break;
}
case BluePixelChannel:
{
SetPixelBlue(magnitude_image,ClampToQuantum(QuantumRange*
magnitude_pixels[i]),q);
break;
}
case BlackPixelChannel:
{
SetPixelBlack(magnitude_image,ClampToQuantum(QuantumRange*
magnitude_pixels[i]),q);
break;
}
case AlphaPixelChannel:
{
SetPixelAlpha(magnitude_image,ClampToQuantum(QuantumRange*
magnitude_pixels[i]),q);
break;
}
}
i++;
q+=GetPixelChannels(magnitude_image);
}
status=SyncCacheViewAuthenticPixels(magnitude_view,exception);
if (status == MagickFalse)
break;
}
magnitude_view=DestroyCacheView(magnitude_view);
i=0L;
phase_view=AcquireAuthenticCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
SetPixelRed(phase_image,ClampToQuantum(QuantumRange*
phase_pixels[i]),q);
break;
}
case GreenPixelChannel:
{
SetPixelGreen(phase_image,ClampToQuantum(QuantumRange*
phase_pixels[i]),q);
break;
}
case BluePixelChannel:
{
SetPixelBlue(phase_image,ClampToQuantum(QuantumRange*
phase_pixels[i]),q);
break;
}
case BlackPixelChannel:
{
SetPixelBlack(phase_image,ClampToQuantum(QuantumRange*
phase_pixels[i]),q);
break;
}
case AlphaPixelChannel:
{
SetPixelAlpha(phase_image,ClampToQuantum(QuantumRange*
phase_pixels[i]),q);
break;
}
}
i++;
q+=GetPixelChannels(phase_image);
}
status=SyncCacheViewAuthenticPixels(phase_view,exception);
if (status == MagickFalse)
break;
}
phase_view=DestroyCacheView(phase_view);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info,
const Image *image,double *magnitude_pixels,double *phase_pixels,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*value;
double
*source_pixels;
fftw_complex
*forward_pixels;
fftw_plan
fftw_r2c_plan;
MemoryInfo
*forward_info,
*source_info;
register const Quantum
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Generate the forward Fourier transform.
*/
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
memset(source_pixels,0,fourier_info->width*fourier_info->height*
sizeof(*source_pixels));
i=0L;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
source_pixels[i]=QuantumScale*GetPixelRed(image,p);
break;
}
case GreenPixelChannel:
{
source_pixels[i]=QuantumScale*GetPixelGreen(image,p);
break;
}
case BluePixelChannel:
{
source_pixels[i]=QuantumScale*GetPixelBlue(image,p);
break;
}
case BlackPixelChannel:
{
source_pixels[i]=QuantumScale*GetPixelBlack(image,p);
break;
}
case AlphaPixelChannel:
{
source_pixels[i]=QuantumScale*GetPixelAlpha(image,p);
break;
}
}
i++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
forward_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*forward_pixels));
if (forward_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
return(MagickFalse);
}
forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ForwardFourierTransform)
#endif
fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height,
source_pixels,forward_pixels,FFTW_ESTIMATE);
fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels);
fftw_destroy_plan(fftw_r2c_plan);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0))
{
double
gamma;
/*
Normalize fourier transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
forward_pixels[i]*=gamma;
#else
forward_pixels[i][0]*=gamma;
forward_pixels[i][1]*=gamma;
#endif
i++;
}
}
/*
Generate magnitude and phase (or real and imaginary).
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=cabs(forward_pixels[i]);
phase_pixels[i]=carg(forward_pixels[i]);
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=creal(forward_pixels[i]);
phase_pixels[i]=cimag(forward_pixels[i]);
i++;
}
forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info);
return(MagickTrue);
}
static MagickBooleanType ForwardFourierTransformChannel(const Image *image,
const PixelChannel channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
double
*magnitude_pixels,
*phase_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
fourier_info.width=image->columns;
fourier_info.height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows : image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info == (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels,
phase_pixels,exception);
if (status != MagickFalse)
status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels,
phase_pixels,exception);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
#endif
MagickExport Image *ForwardFourierTransformImage(const Image *image,
const MagickBooleanType modulus,ExceptionInfo *exception)
{
Image
*fourier_image;
fourier_image=NewImageList();
#if !defined(MAGICKCORE_FFTW_DELEGATE)
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
image->filename);
#else
{
Image
*magnitude_image;
size_t
height,
width;
width=image->columns;
height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
size_t extent=image->columns < image->rows ? image->rows :
image->columns;
width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
height=width;
magnitude_image=CloneImage(image,width,height,MagickTrue,exception);
if (magnitude_image != (Image *) NULL)
{
Image
*phase_image;
magnitude_image->storage_class=DirectClass;
magnitude_image->depth=32UL;
phase_image=CloneImage(image,width,height,MagickTrue,exception);
if (phase_image == (Image *) NULL)
magnitude_image=DestroyImage(magnitude_image);
else
{
MagickBooleanType
is_gray,
status;
phase_image->storage_class=DirectClass;
phase_image->depth=32UL;
AppendImageToList(&fourier_image,magnitude_image);
AppendImageToList(&fourier_image,phase_image);
status=MagickTrue;
is_gray=IsImageGray(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GrayPixelChannel,modulus,fourier_image,exception);
else
thread_status=ForwardFourierTransformChannel(image,
RedPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GreenPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
BluePixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->colorspace == CMYKColorspace)
thread_status=ForwardFourierTransformChannel(image,
BlackPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->alpha_trait != UndefinedPixelTrait)
thread_status=ForwardFourierTransformChannel(image,
AlphaPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImageList(fourier_image);
fftw_cleanup();
}
}
}
#endif
return(fourier_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InverseFourierTransformImage() implements the inverse discrete Fourier
% transform (DFT) of the image either as a magnitude / phase or real /
% imaginary image pair.
%
% The format of the InverseFourierTransformImage method is:
%
% Image *InverseFourierTransformImage(const Image *magnitude_image,
% const Image *phase_image,const MagickBooleanType modulus,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o magnitude_image: the magnitude or real image.
%
% o phase_image: the phase or imaginary image.
%
% o modulus: if true, return transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType InverseQuadrantSwap(const size_t width,
const size_t height,const double *source,double *destination)
{
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) (width/2L)+1L;
for (y=1L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L+1L); x++)
destination[(height-y)*center-x+width/2L]=source[y*width+x];
for (y=0L; y < (ssize_t) height; y++)
destination[y*center]=source[y*width+width/2L];
for (x=0L; x < center; x++)
destination[x]=source[center-x-1L];
return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination));
}
static MagickBooleanType InverseFourier(FourierInfo *fourier_info,
const Image *magnitude_image,const Image *phase_image,
fftw_complex *fourier_pixels,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*inverse_pixels,
*magnitude_pixels,
*phase_pixels;
MagickBooleanType
status;
MemoryInfo
*inverse_info,
*magnitude_info,
*phase_info;
register const Quantum
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Inverse fourier - read image and break down into a double array.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*phase_pixels));
inverse_info=AcquireVirtualMemory((size_t) fourier_info->width,
(fourier_info->height/2+1)*sizeof(*inverse_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL) ||
(inverse_info == (MemoryInfo *) NULL))
{
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (inverse_info != (MemoryInfo *) NULL)
inverse_info=RelinquishVirtualMemory(inverse_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info);
i=0L;
magnitude_view=AcquireVirtualCacheView(magnitude_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
magnitude_pixels[i]=QuantumScale*GetPixelRed(magnitude_image,p);
break;
}
case GreenPixelChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelGreen(magnitude_image,p);
break;
}
case BluePixelChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelBlue(magnitude_image,p);
break;
}
case BlackPixelChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelBlack(magnitude_image,p);
break;
}
case AlphaPixelChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelAlpha(magnitude_image,p);
break;
}
}
i++;
p+=GetPixelChannels(magnitude_image);
}
}
magnitude_view=DestroyCacheView(magnitude_view);
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude_pixels,inverse_pixels);
(void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*magnitude_pixels));
i=0L;
phase_view=AcquireVirtualCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
phase_pixels[i]=QuantumScale*GetPixelRed(phase_image,p);
break;
}
case GreenPixelChannel:
{
phase_pixels[i]=QuantumScale*GetPixelGreen(phase_image,p);
break;
}
case BluePixelChannel:
{
phase_pixels[i]=QuantumScale*GetPixelBlue(phase_image,p);
break;
}
case BlackPixelChannel:
{
phase_pixels[i]=QuantumScale*GetPixelBlack(phase_image,p);
break;
}
case AlphaPixelChannel:
{
phase_pixels[i]=QuantumScale*GetPixelAlpha(phase_image,p);
break;
}
}
i++;
p+=GetPixelChannels(phase_image);
}
}
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]-=0.5;
phase_pixels[i]*=(2.0*MagickPI);
i++;
}
}
phase_view=DestroyCacheView(phase_view);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (status != MagickFalse)
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
phase_pixels,inverse_pixels);
(void) memcpy(phase_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*phase_pixels));
inverse_info=RelinquishVirtualMemory(inverse_info);
/*
Merge two sets.
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I*
magnitude_pixels[i]*sin(phase_pixels[i]);
#else
fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]);
fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]);
#endif
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i];
#else
fourier_pixels[i][0]=magnitude_pixels[i];
fourier_pixels[i][1]=phase_pixels[i];
#endif
i++;
}
magnitude_info=RelinquishVirtualMemory(magnitude_info);
phase_info=RelinquishVirtualMemory(phase_info);
return(status);
}
static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info,
fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*value;
double
*source_pixels;
fftw_plan
fftw_c2r_plan;
MemoryInfo
*source_info;
register Quantum
*q;
register ssize_t
i,
x;
ssize_t
y;
source_info=AcquireVirtualMemory((size_t) fourier_info->width,
fourier_info->height*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if (LocaleCompare(value,"inverse") == 0)
{
double
gamma;
/*
Normalize inverse transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]*=gamma;
#else
fourier_pixels[i][0]*=gamma;
fourier_pixels[i][1]*=gamma;
#endif
i++;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InverseFourierTransform)
#endif
fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height,
fourier_pixels,source_pixels,FFTW_ESTIMATE);
fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels);
fftw_destroy_plan(fftw_c2r_plan);
i=0L;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
if (y >= (ssize_t) image->rows)
break;
q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width >
image->columns ? image->columns : fourier_info->width,1UL,exception);
if (q == (Quantum *) NULL)
break;
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
if (x < (ssize_t) image->columns)
switch (fourier_info->channel)
{
case RedPixelChannel:
default:
{
SetPixelRed(image,ClampToQuantum(QuantumRange*source_pixels[i]),q);
break;
}
case GreenPixelChannel:
{
SetPixelGreen(image,ClampToQuantum(QuantumRange*source_pixels[i]),
q);
break;
}
case BluePixelChannel:
{
SetPixelBlue(image,ClampToQuantum(QuantumRange*source_pixels[i]),
q);
break;
}
case BlackPixelChannel:
{
SetPixelBlack(image,ClampToQuantum(QuantumRange*source_pixels[i]),
q);
break;
}
case AlphaPixelChannel:
{
SetPixelAlpha(image,ClampToQuantum(QuantumRange*source_pixels[i]),
q);
break;
}
}
i++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType InverseFourierTransformChannel(
const Image *magnitude_image,const Image *phase_image,
const PixelChannel channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
fftw_complex
*inverse_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*inverse_info;
fourier_info.width=magnitude_image->columns;
fourier_info.height=magnitude_image->rows;
if ((magnitude_image->columns != magnitude_image->rows) ||
((magnitude_image->columns % 2) != 0) ||
((magnitude_image->rows % 2) != 0))
{
size_t extent=magnitude_image->columns < magnitude_image->rows ?
magnitude_image->rows : magnitude_image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
inverse_info=AcquireVirtualMemory((size_t) fourier_info.width,
(fourier_info.height/2+1)*sizeof(*inverse_pixels));
if (inverse_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info);
status=InverseFourier(&fourier_info,magnitude_image,phase_image,
inverse_pixels,exception);
if (status != MagickFalse)
status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image,
exception);
inverse_info=RelinquishVirtualMemory(inverse_info);
return(status);
}
#endif
MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image,
const Image *phase_image,const MagickBooleanType modulus,
ExceptionInfo *exception)
{
Image
*fourier_image;
assert(magnitude_image != (Image *) NULL);
assert(magnitude_image->signature == MagickCoreSignature);
if (magnitude_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
magnitude_image->filename);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",magnitude_image->filename);
return((Image *) NULL);
}
#if !defined(MAGICKCORE_FFTW_DELEGATE)
fourier_image=(Image *) NULL;
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
magnitude_image->filename);
#else
{
fourier_image=CloneImage(magnitude_image,magnitude_image->columns,
magnitude_image->rows,MagickTrue,exception);
if (fourier_image != (Image *) NULL)
{
MagickBooleanType
is_gray,
status;
status=MagickTrue;
is_gray=IsImageGray(magnitude_image);
if (is_gray != MagickFalse)
is_gray=IsImageGray(phase_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GrayPixelChannel,modulus,fourier_image,exception);
else
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,RedPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GreenPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BluePixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->colorspace == CMYKColorspace)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BlackPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->alpha_trait != UndefinedPixelTrait)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,AlphaPixelChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImage(fourier_image);
}
fftw_cleanup();
}
#endif
return(fourier_image);
}
|
tinyexr.h | #ifndef TINYEXR_H_
#define TINYEXR_H_
/*
Copyright (c) 2014 - 2020, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_THREAD
#define TINYEXR_USE_THREAD (0) // No threaded loading.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_OPENMP
#ifdef _OPENMP
#define TINYEXR_USE_OPENMP (1)
#else
#define TINYEXR_USE_OPENMP (0)
#endif
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
#define TINYEXR_ERROR_LAYER_NOT_FOUND (-13)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
// tile format image;
// not zero for only a single-part "normal" tiled file (according to spec.)
int tiled;
int long_name; // long name attribute
// deep image(EXR 2.0);
// for a multi-part file, indicates that at least one part is of type deep* (according to spec.)
int non_image;
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRBox2i {
int min_x;
int min_y;
int max_x;
int max_y;
} EXRBox2i;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
EXRBox2i data_window;
EXRBox2i display_window;
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
// for a single-part file, agree with the version field bit 11
// for a multi-part file, it is consistent with the type of part
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
// name attribute required for multipart files;
// must be unique and non empty (according to spec.);
// use EXRSetNameAttr for setting value;
// max 255 character allowed - excluding terminating zero
char name[256];
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
struct _EXRImage* next_level; // NULL if scanline format or image is the last level.
int level_x; // x level index
int level_y; // y level index
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { For backward compatibility. Not recommended to use. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// Loads single-frame OpenEXR image by specifying layer name. Assume EXR image
// contains A(single channel alpha) or RGB(A) channels. Application must free
// image data as returned by `out_rgba` Result image format is: float x RGBA x
// width x hight Returns negative value and may set error string in `err` when
// there's an error When the specified layer name is not found in the EXR file,
// the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`.
extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layer_name,
const char **err);
//
// Get layer infos from EXR file.
//
// @param[out] layer_names List of layer names. Application must free memory
// after using this.
// @param[out] num_layers The number of layers
// @param[out] err Error string(will be filled when the function returns error
// code). Free it using FreeEXRErrorMessage after using this value.
//
// @return TINYEXR_SUCCEES upon success.
//
extern int EXRLayers(const char *filename, const char **layer_names[],
int *num_layers, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Returns the number of resolution levels of the image (including the base)
extern int EXRNumLevels(const EXRImage* exr_image);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Set name attribute of EXRHeader struct (it makes a copy)
extern void EXRSetNameAttr(EXRHeader *exr_header, const char* name);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Frees internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Frees internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Frees error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRMultipartImageToFile(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const char *filename, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRMultipartImageToMemory(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEFINED
#define TINYEXR_IMPLEMENTATION_DEFINED
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h> // for UTF-8
#endif
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
// #include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#include <set>
// https://stackoverflow.com/questions/5047971/how-do-i-check-for-c11-support
#if __cplusplus > 199711L || (defined(_MSC_VER) && _MSC_VER >= 1900)
#define TINYEXR_HAS_CXX11 (1)
// C++11
#include <cstdint>
#if TINYEXR_USE_THREAD
#include <atomic>
#include <thread>
#endif
#endif // __cplusplus > 199711L
#if TINYEXR_USE_OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Weverything"
#endif
#include "zfp.h"
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#pragma clang diagnostic ignored "-Wundef"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#if __has_warning("-Wmacro-redefined")
#pragma clang diagnostic ignored "-Wmacro-redefined"
#endif
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#if __has_warning("-Wtautological-constant-compare")
#pragma clang diagnostic ignored "-Wtautological-constant-compare"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occurred in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
//#include <assert.h>
//#include <string.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end =
pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end =
pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags & ~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct {
mz_uint16 m_key, m_sym_index;
} tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n, use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment,
p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
} // namespace miniz
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(float *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
float tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(&outLen);
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
int min_x;
int min_y;
int max_x;
int max_y;
} Box2iInfo;
struct HeaderInfo {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
Box2iInfo data_window;
int line_order;
Box2iInfo display_window;
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tiled; // Non-zero if the part is tiled.
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
// required for multi-part or non-image files
std::string name;
// required for multi-part or non-image files
std::string type;
void clear() {
channels.clear();
attributes.clear();
data_window.min_x = 0;
data_window.min_y = 0;
data_window.max_x = 0;
data_window.max_y = 0;
line_order = 0;
display_window.min_x = 0;
display_window.min_y = 0;
display_window.max_x = 0;
display_window.max_y = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tiled = 0;
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
name.clear();
type.clear();
}
};
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(&info.pixel_type);
tinyexr::swap4(&info.x_sampling);
tinyexr::swap4(&info.y_sampling);
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(&pixel_type);
tinyexr::swap4(&x_sampling);
tinyexr::swap4(&y_sampling);
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (miniz::MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressible run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierarchical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
unsigned int len : 8; // code length 0
unsigned int lit : 24; // lit p size
unsigned int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
unsigned int *p = pl->p;
pl->p = new unsigned int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new unsigned int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
if ((in + 1) >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
unsigned int precision;
unsigned int __pad0;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
unsigned int __pad1;
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0;
}
};
static bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes, std::string *err) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) {
if (attributes[i].size == 1) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
break;
} else {
if (err) {
(*err) +=
"zfpCompressionType attribute must be uchar(1 byte) type.\n";
}
return false;
}
}
}
if (!foundType) {
if (err) {
(*err) += "`zfpCompressionType` attribute not found.\n";
}
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionRate` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionPrecision` attribute not found.\n";
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionTolerance` attribute not found.\n";
}
} else {
if (err) {
(*err) += "Unknown value specified for `zfpCompressionType`.\n";
}
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
size_t num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size =
size_t(dst_width) * size_t(dst_num_lines) * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, static_cast<unsigned int>(dst_width),
static_cast<unsigned int>(dst_num_lines) *
static_cast<unsigned int>(num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = size_t(dst_width) * size_t(dst_num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// decompress 4x4 pixel block.
for (size_t y = 0; y < size_t(dst_num_lines); y += 4) {
for (size_t x = 0; x < size_t(dst_width); x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
static bool CompressZfp(std::vector<unsigned char> *outBuf,
unsigned int *outSize, const float *inPtr, int width,
int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, static_cast<unsigned int>(width),
static_cast<unsigned int>(num_lines * num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = size_t(width) * size_t(num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// compress 4x4 pixel block.
for (size_t y = 0; y < size_t(num_lines); y += 4) {
for (size_t x = 0; x < size_t(width); x += 4) {
float fblock[16];
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp));
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// heuristics
#define TINYEXR_DIMENSION_THRESHOLD (1024 * 8192)
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
std::string e;
if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes,
int(num_attributes), &e)) {
// This code path should not be reachable.
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static bool DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
// Here, data_width and data_height are the dimensions of the current (sub)level.
if (tile_size_x * tile_offset_x > data_width ||
tile_size_y * tile_offset_y > data_height) {
return false;
}
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
#ifdef _WIN32
static inline std::wstring UTF8ToWchar(const std::string &str) {
int wstr_size =
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0);
std::wstring wstr(wstr_size, 0);
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0],
(int)wstr.size());
return wstr;
}
#endif
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
bool has_name = false;
bool has_type = false;
info->name.clear();
info->type.clear();
info->data_window.min_x = 0;
info->data_window.min_y = 0;
info->data_window.max_x = 0;
info->data_window.max_y = 0;
info->line_order = 0; // @fixme
info->display_window.min_x = 0;
info->display_window.min_y = 0;
info->display_window.max_x = 0;
info->display_window.max_y = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tiled = 0;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
// For a multipart file, the version field 9th bit is 0.
if ((version->tiled || version->multipart || version->non_image) && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) ||
y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) {
if (err) {
(*err) = "Tile sizes were invalid.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
info->tiled = 1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->data_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->data_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->data_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->data_window.min_x);
tinyexr::swap4(&info->data_window.min_y);
tinyexr::swap4(&info->data_window.max_x);
tinyexr::swap4(&info->data_window.max_y);
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->display_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->display_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->display_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->display_window.min_x);
tinyexr::swap4(&info->display_window.min_y);
tinyexr::swap4(&info->display_window.max_x);
tinyexr::swap4(&info->display_window.max_y);
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(&info->pixel_aspect_ratio);
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(&info->screen_window_center[0]);
tinyexr::swap4(&info->screen_window_center[1]);
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(&info->screen_window_width);
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(&info->chunk_count);
}
} else if (attr_name.compare("name") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->name.resize(len);
info->name.assign(reinterpret_cast<const char*>(&data[0]), len);
has_name = true;
}
} else if (attr_name.compare("type") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->type.resize(len);
info->type.assign(reinterpret_cast<const char*>(&data[0]), len);
has_type = true;
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (version->multipart || version->non_image) {
if (!has_name) {
ss_err << "\"name\" attribute not found in the header."
<< std::endl;
}
if (!has_type) {
ss_err << "\"type\" attribute not found in the header."
<< std::endl;
}
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window.min_x = info.display_window.min_x;
exr_header->display_window.min_y = info.display_window.min_y;
exr_header->display_window.max_x = info.display_window.max_x;
exr_header->display_window.max_y = info.display_window.max_y;
exr_header->data_window.min_x = info.data_window.min_x;
exr_header->data_window.min_y = info.data_window.min_y;
exr_header->data_window.max_x = info.data_window.max_x;
exr_header->data_window.max_y = info.data_window.max_y;
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tiled = info.tiled;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
EXRSetNameAttr(exr_header, info.name.c_str());
if (!info.type.empty()) {
if (info.type == "scanlineimage") {
assert(!exr_header->tiled);
} else if (info.type == "tiledimage") {
assert(exr_header->tiled);
} else if (info.type == "deeptile") {
exr_header->non_image = 1;
assert(exr_header->tiled);
} else if (info.type == "deepscanline") {
exr_header->non_image = 1;
assert(!exr_header->tiled);
} else {
assert(false);
}
}
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy pointer
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
}
struct OffsetData {
OffsetData() : num_x_levels(0), num_y_levels(0) {}
std::vector<std::vector<std::vector <tinyexr::tinyexr_uint64> > > offsets;
int num_x_levels;
int num_y_levels;
};
int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) {
switch (tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
return 0;
case TINYEXR_TILE_MIPMAP_LEVELS:
return lx;
case TINYEXR_TILE_RIPMAP_LEVELS:
return lx + ly * num_x_levels;
default:
assert(false);
}
return 0;
}
static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) {
assert(level >= 0);
int b = (int)(1u << (unsigned)level);
int level_size = toplevel_size / b;
if (tile_rounding_mode == TINYEXR_TILE_ROUND_UP && level_size * b < toplevel_size)
level_size += 1;
return std::max(level_size, 1);
}
static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header,
const OffsetData& offset_data,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const unsigned char* head, const size_t size,
std::string* err) {
int num_channels = exr_header->num_channels;
int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels);
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
int num_tiles = num_x_tiles * num_y_tiles;
int err_code = TINYEXR_SUCCESS;
enum {
EF_SUCCESS = 0,
EF_INVALID_DATA = 1,
EF_INSUFFICIENT_DATA = 2,
EF_FAILED_TO_DECODE = 4
};
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<unsigned> error_flag(EF_SUCCESS);
#else
unsigned error_flag(EF_SUCCESS);
#endif
// Although the spec says : "...the data window is subdivided into an array of smaller rectangles...",
// the IlmImf library allows the dimensions of the tile to be larger (or equal) than the dimensions of the data window.
#if 0
if ((exr_header->tile_size_x > exr_image->width || exr_header->tile_size_y > exr_image->height) &&
exr_image->level_x == 0 && exr_image->level_y == 0) {
if (err) {
(*err) += "Failed to decode tile data.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
}
#endif
exr_image->tiles = static_cast<EXRTile*>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]()
{
int tile_idx = 0;
while ((tile_idx = tile_count++) < num_tiles) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
#endif
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels,
exr_header->requested_pixel_types, exr_header->tile_size_x,
exr_header->tile_size_y);
int x_tile = tile_idx % num_x_tiles;
int y_tile = tile_idx / num_x_tiles;
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
tinyexr::tinyexr_uint64 offset = offset_data.offsets[level_index][y_tile][x_tile];
if (offset + sizeof(int) * 5 > size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
size_t data_size =
size_t(size - (offset + sizeof(int) * 5));
const unsigned char* data_ptr =
reinterpret_cast<const unsigned char*>(head + offset);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(&tile_coordinates[0]);
tinyexr::swap4(&tile_coordinates[1]);
tinyexr::swap4(&tile_coordinates[2]);
tinyexr::swap4(&tile_coordinates[3]);
if (tile_coordinates[2] != exr_image->level_x) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
if (tile_coordinates[3] != exr_image->level_y) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(&data_len);
if (data_len < 2 || size_t(data_len) > data_size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
bool ret = tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order,
exr_image->width, exr_image->height,
tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x,
exr_header->tile_size_y, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list);
if (!ret) {
// Failed to decode tile data.
error_flag |= EF_FAILED_TO_DECODE;
}
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
} // num_thread loop
for (auto& t : workers) {
t.join();
}
#else
} // parallel for
#endif
// Even in the event of an error, the reserved memory may be freed.
exr_image->num_channels = num_channels;
exr_image->num_tiles = static_cast<int>(num_tiles);
if (error_flag) err_code = TINYEXR_ERROR_INVALID_DATA;
if (err) {
if (error_flag & EF_INSUFFICIENT_DATA) {
(*err) += "Insufficient data length.\n";
}
if (error_flag & EF_FAILED_TO_DECODE) {
(*err) += "Failed to decode tile data.\n";
}
}
return err_code;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const OffsetData& offset_data,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param,
exr_header->custom_attributes,
int(exr_header->num_custom_attributes), err)) {
return TINYEXR_ERROR_INVALID_HEADER;
}
#endif
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_y < exr_header->data_window.min_y) {
if (err) {
(*err) += "Invalid data window.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if ((data_width > TINYEXR_DIMENSION_THRESHOLD) || (data_height > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tiled) {
if ((exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) || (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "tile with or tile height too large. tile width: " << exr_header->tile_size_x
<< ", "
<< "tile height = " << exr_header->tile_size_y << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
const std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) {
EXRImage* level_image = NULL;
for (int level = 0; level < offset_data.num_x_levels; ++level) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode);
level_image->level_x = level;
level_image->level_y = level;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
} else {
EXRImage* level_image = NULL;
for (int level_y = 0; level_y < offset_data.num_y_levels; ++level_y)
for (int level_x = 0; level_x < offset_data.num_x_levels; ++level_x) {
if (!level_image) {
level_image = exr_image;
} else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode);
level_image->level_x = level_x;
level_image->level_y = level_y;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
}
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown =
sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> y_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_blocks)) {
num_threads = int(num_blocks);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int y = 0;
while ((y = y_count++) < int(num_blocks)) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
#endif
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size =
size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(&line_no);
tinyexr::swap4(&data_len);
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example
// `data_len < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window.max_y + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno =
static_cast<tinyexr_int64>(line_no) -
static_cast<tinyexr_int64>(exr_header->data_window.min_y);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window.min_y;
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(
exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(&y);
tinyexr::swap4(&data_len);
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int FloorLog2(unsigned x) {
//
// For x > 0, floorLog2(y) returns floor(log(x)/log(2)).
//
int y = 0;
while (x > 1) {
y += 1;
x >>= 1u;
}
return y;
}
static int CeilLog2(unsigned x) {
//
// For x > 0, ceilLog2(y) returns ceil(log(x)/log(2)).
//
int y = 0;
int r = 0;
while (x > 1) {
if (x & 1)
r = 1;
y += 1;
x >>= 1u;
}
return y + r;
}
static int RoundLog2(int x, int tile_rounding_mode) {
return (tile_rounding_mode == TINYEXR_TILE_ROUND_DOWN) ? FloorLog2(static_cast<unsigned>(x)) : CeilLog2(static_cast<unsigned>(x));
}
static int CalculateNumXLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
num = RoundLog2(w, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static int CalculateNumYLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int h = max_y - min_y + 1;
num = RoundLog2(h, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static void CalculateNumTiles(std::vector<int>& numTiles,
int toplevel_size,
int size,
int tile_rounding_mode) {
for (unsigned i = 0; i < numTiles.size(); i++) {
int l = LevelSize(toplevel_size, i, tile_rounding_mode);
assert(l <= std::numeric_limits<int>::max() - size + 1);
numTiles[i] = (l + size - 1) / size;
}
}
static void PrecalculateTileInfo(std::vector<int>& num_x_tiles,
std::vector<int>& num_y_tiles,
const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num_x_levels = CalculateNumXLevels(exr_header);
int num_y_levels = CalculateNumYLevels(exr_header);
num_x_tiles.resize(num_x_levels);
num_y_tiles.resize(num_y_levels);
CalculateNumTiles(num_x_tiles,
max_x - min_x + 1,
exr_header->tile_size_x,
exr_header->tile_rounding_mode);
CalculateNumTiles(num_y_tiles,
max_y - min_y + 1,
exr_header->tile_size_y,
exr_header->tile_rounding_mode);
}
static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) {
offset_data.offsets.resize(1);
offset_data.offsets[0].resize(1);
offset_data.offsets[0][0].resize(num_blocks);
offset_data.num_x_levels = 1;
offset_data.num_y_levels = 1;
}
// Return sum of tile blocks.
static int InitTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const std::vector<int>& num_x_tiles,
const std::vector<int>& num_y_tiles) {
int num_tile_blocks = 0;
offset_data.num_x_levels = static_cast<int>(num_x_tiles.size());
offset_data.num_y_levels = static_cast<int>(num_y_tiles.size());
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
case TINYEXR_TILE_MIPMAP_LEVELS:
assert(offset_data.num_x_levels == offset_data.num_y_levels);
offset_data.offsets.resize(offset_data.num_x_levels);
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
offset_data.offsets[l].resize(num_y_tiles[l]);
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[l]);
num_tile_blocks += num_x_tiles[l];
}
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
offset_data.offsets.resize(static_cast<size_t>(offset_data.num_x_levels) * static_cast<size_t>(offset_data.num_y_levels));
for (int ly = 0; ly < offset_data.num_y_levels; ++ly) {
for (int lx = 0; lx < offset_data.num_x_levels; ++lx) {
int l = ly * offset_data.num_x_levels + lx;
offset_data.offsets[l].resize(num_y_tiles[ly]);
for (size_t dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[lx]);
num_tile_blocks += num_x_tiles[lx];
}
}
}
break;
default:
assert(false);
}
return num_tile_blocks;
}
static bool IsAnyOffsetsAreInvalid(const OffsetData& offset_data) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx)
if (reinterpret_cast<const tinyexr::tinyexr_int64&>(offset_data.offsets[l][dy][dx]) <= 0)
return true;
return false;
}
static bool isValidTile(const EXRHeader* exr_header,
const OffsetData& offset_data,
int dx, int dy, int lx, int ly) {
if (lx < 0 || ly < 0 || dx < 0 || dy < 0) return false;
int num_x_levels = offset_data.num_x_levels;
int num_y_levels = offset_data.num_y_levels;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
if (lx == 0 &&
ly == 0 &&
offset_data.offsets.size() > 0 &&
offset_data.offsets[0].size() > static_cast<size_t>(dy) &&
offset_data.offsets[0][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
if (lx < num_x_levels &&
ly < num_y_levels &&
offset_data.offsets.size() > static_cast<size_t>(lx) &&
offset_data.offsets[lx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[lx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
size_t idx = static_cast<size_t>(lx) + static_cast<size_t>(ly)* static_cast<size_t>(num_x_levels);
if (lx < num_x_levels &&
ly < num_y_levels &&
(offset_data.offsets.size() > idx) &&
offset_data.offsets[idx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[idx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
}
break;
default:
return false;
}
return false;
}
static void ReconstructTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const unsigned char* head, const unsigned char* marker, const size_t /*size*/,
bool isMultiPartFile,
bool isDeep) {
int numXLevels = offset_data.num_x_levels;
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 tileOffset = marker - head;
if (isMultiPartFile) {
//int partNumber;
marker += sizeof(int);
}
int tileX;
memcpy(&tileX, marker, sizeof(int));
tinyexr::swap4(&tileX);
marker += sizeof(int);
int tileY;
memcpy(&tileY, marker, sizeof(int));
tinyexr::swap4(&tileY);
marker += sizeof(int);
int levelX;
memcpy(&levelX, marker, sizeof(int));
tinyexr::swap4(&levelX);
marker += sizeof(int);
int levelY;
memcpy(&levelY, marker, sizeof(int));
tinyexr::swap4(&levelY);
marker += sizeof(int);
if (isDeep) {
tinyexr::tinyexr_int64 packed_offset_table_size;
memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_offset_table_size));
marker += sizeof(tinyexr::tinyexr_int64);
tinyexr::tinyexr_int64 packed_sample_size;
memcpy(&packed_sample_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_sample_size));
marker += sizeof(tinyexr::tinyexr_int64);
// next Int64 is unpacked sample size - skip that too
marker += packed_offset_table_size + packed_sample_size + 8;
} else {
int dataSize;
memcpy(&dataSize, marker, sizeof(int));
tinyexr::swap4(&dataSize);
marker += sizeof(int);
marker += dataSize;
}
if (!isValidTile(exr_header, offset_data,
tileX, tileY, levelX, levelY))
return;
int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels);
offset_data.offsets[level_idx][tileY][tileX] = tileOffset;
}
}
}
}
// marker output is also
static int ReadOffsets(OffsetData& offset_data,
const unsigned char* head,
const unsigned char*& marker,
const size_t size,
const char** err) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offset_data.offsets[l][dy][dx] = offset;
}
}
}
return TINYEXR_SUCCESS;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_x - exr_header->data_window.min_x ==
std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
if (exr_header->data_window.max_y < exr_header->data_window.min_y ||
exr_header->data_window.max_y - exr_header->data_window.min_y ==
std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if (data_width > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
if (exr_header->tiled) {
if (exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
OffsetData offset_data;
size_t num_blocks = 0;
// For a multi-resolution image, the size of the offset table will be calculated from the other attributes of the header.
// If chunk_count > 0 then chunk_count must be equal to the calculated tile count.
if (exr_header->tiled) {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header);
num_blocks = InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles);
if (exr_header->chunk_count > 0) {
if (exr_header->chunk_count != static_cast<int>(num_blocks)) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
int ret = ReadOffsets(offset_data, head, marker, size, err);
if (ret != TINYEXR_SUCCESS) return ret;
if (IsAnyOffsetsAreInvalid(offset_data)) {
ReconstructTileOffsets(offset_data, exr_header,
head, marker, size,
exr_header->multipart, exr_header->non_image);
}
} else if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
InitSingleResolutionOffsets(offset_data, num_blocks);
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
InitSingleResolutionOffsets(offset_data, num_blocks);
}
if (!exr_header->tiled) {
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offset_data, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
#if 1
FreeEXRImage(exr_image);
#else
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
#endif
}
return ret;
}
}
static void GetLayers(const EXRHeader &exr_header,
std::vector<std::string> &layer_names) {
// Naive implementation
// Group channels by layers
// go over all channel names, split by periods
// collect unique names
layer_names.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string full_name(exr_header.channels[c].name);
const size_t pos = full_name.find_last_of('.');
if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) {
full_name.erase(pos);
if (std::find(layer_names.begin(), layer_names.end(), full_name) ==
layer_names.end())
layer_names.push_back(full_name);
}
}
}
struct LayerChannel {
explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {}
size_t index;
std::string name;
};
static void ChannelsInLayer(const EXRHeader &exr_header,
const std::string layer_name,
std::vector<LayerChannel> &channels) {
channels.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string ch_name(exr_header.channels[c].name);
if (layer_name.empty()) {
const size_t pos = ch_name.find_last_of('.');
if (pos != std::string::npos && pos < ch_name.size()) {
ch_name = ch_name.substr(pos + 1);
}
} else {
const size_t pos = ch_name.find(layer_name + '.');
if (pos == std::string::npos) continue;
if (pos == 0) {
ch_name = ch_name.substr(layer_name.size() + 1);
}
}
LayerChannel ch(size_t(c), ch_name);
channels.push_back(ch);
}
}
} // namespace tinyexr
int EXRLayers(const char *filename, const char **layer_names[], int *num_layers,
const char **err) {
EXRVersion exr_version;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
std::vector<std::string> layer_vec;
tinyexr::GetLayers(exr_header, layer_vec);
(*num_layers) = int(layer_vec.size());
(*layer_names) = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size())));
for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) {
#ifdef _MSC_VER
(*layer_names)[c] = _strdup(layer_vec[c].c_str());
#else
(*layer_names)[c] = strdup(layer_vec[c].c_str());
#endif
}
FreeEXRHeader(&exr_header);
return TINYEXR_SUCCESS;
}
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
return LoadEXRWithLayer(out_rgba, width, height, filename,
/* layername */ NULL, err);
}
int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layername,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to open EXR file or read version info from EXR file. code("
<< ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
// TODO: Probably limit loading to layers (channels) selected by layer index
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
std::vector<std::string> layer_names;
tinyexr::GetLayers(exr_header, layer_names);
std::vector<tinyexr::LayerChannel> channels;
tinyexr::ChannelsInLayer(
exr_header, layername == NULL ? "" : std::string(layername), channels);
if (channels.size() < 1) {
tinyexr::SetErrorMessage("Layer Not Found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_LAYER_NOT_FOUND;
}
size_t ch_count = channels.size() < 4 ? channels.size() : 4;
for (size_t c = 0; c < ch_count; c++) {
const tinyexr::LayerChannel &ch = channels[c];
if (ch.name == "R") {
idxR = int(ch.index);
} else if (ch.name == "G") {
idxG = int(ch.index);
} else if (ch.name == "B") {
idxB = int(ch.index);
} else if (ch.name == "A") {
idxA = int(ch.index);
}
}
if (channels.size() == 1) {
int chIdx = int(channels.front().index);
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii = exr_image.tiles[it].offset_x *
static_cast<int>(exr_header.tile_size_x) +
i;
const int jj = exr_image.tiles[it].offset_y *
static_cast<int>(exr_header.tile_size_y) +
j;
const int idx = ii + jj * static_cast<int>(exr_image.width);
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val =
reinterpret_cast<float **>(exr_image.images)[chIdx][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
ConvertHeader(exr_header, info);
exr_header->multipart = version->multipart ? 1 : 0;
exr_header->non_image = version->non_image ? 1 : 0;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to parse EXR version. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
namespace tinyexr
{
// out_data must be allocated initially with the block-header size
// of the current image(-part) type
static bool EncodePixelData(/* out */ std::vector<unsigned char>& out_data,
const unsigned char* const* images,
const int* requested_pixel_types,
int compression_type,
int /*line_order*/,
int width, // for tiled : tile.width
int /*height*/, // for tiled : header.tile_size_y
int x_stride, // for tiled : header.tile_size_x
int line_no, // for tiled : 0
int num_lines, // for tiled : tile.height
size_t pixel_data_size,
const std::vector<ChannelInfo>& channels,
const std::vector<size_t>& channel_offset_list,
const void* compression_param = 0) // zfp compression param
{
size_t buf_size = static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
static_cast<size_t>(pixel_data_size);
//int last2bit = (buf_size & 3);
// buf_size must be multiple of four
//if(last2bit) buf_size += 4 - last2bit;
std::vector<unsigned char> buf(buf_size);
size_t start_y = static_cast<size_t>(line_no);
for (size_t c = 0; c < channels.size(); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(&f32.f);
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned short val = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
float val = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned int val = reinterpret_cast<const unsigned int * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
out_data.insert(out_data.end(), buf.begin(), buf.end());
} else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, width, num_lines);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
assert(0);
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
const ZFPCompressionParam* zfp_compression_param = reinterpret_cast<const ZFPCompressionParam*>(compression_param);
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
width, num_lines, static_cast<int>(channels.size()), *zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
(void)compression_param;
assert(0);
#endif
} else {
assert(0);
return false;
}
return true;
}
static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_header,
const std::vector<tinyexr::ChannelInfo>& channels,
std::vector<std::vector<unsigned char> >& data_list,
size_t start_index, // for data_list
int num_x_tiles, int num_y_tiles,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const void* compression_param, // must be set if zfp compression is enabled
std::string* err) {
int num_tiles = num_x_tiles * num_y_tiles;
assert(num_tiles == level_image->num_tiles);
if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) &&
level_image->level_x == 0 && level_image->level_y == 0) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = tile_count++) < num_tiles) {
#else
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_tiles; i++) {
#endif
size_t tile_idx = static_cast<size_t>(i);
size_t data_idx = tile_idx + start_index;
int x_tile = i % num_x_tiles;
int y_tile = i / num_x_tiles;
EXRTile& tile = level_image->tiles[tile_idx];
const unsigned char* const* images =
static_cast<const unsigned char* const*>(tile.images);
data_list[data_idx].resize(5*sizeof(int));
size_t data_header_size = data_list[data_idx].size();
bool ret = EncodePixelData(data_list[data_idx],
images,
exr_header->requested_pixel_types,
exr_header->compression_type,
0, // increasing y
tile.width,
exr_header->tile_size_y,
exr_header->tile_size_x,
0,
tile.height,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue;
}
assert(data_list[data_idx].size() > data_header_size);
int data_len = static_cast<int>(data_list[data_idx].size() - data_header_size);
//tileX, tileY, levelX, levelY // pixel_data_size(int)
memcpy(&data_list[data_idx][0], &x_tile, sizeof(int));
memcpy(&data_list[data_idx][4], &y_tile, sizeof(int));
memcpy(&data_list[data_idx][8], &level_image->level_x, sizeof(int));
memcpy(&data_list[data_idx][12], &level_image->level_y, sizeof(int));
memcpy(&data_list[data_idx][16], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[data_idx][0]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][4]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][8]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][12]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][16]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
return TINYEXR_SUCCESS;
}
static int NumScanlines(int compression_type) {
int num_scanlines = 1;
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
return num_scanlines;
}
static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header,
const std::vector<ChannelInfo>& channels,
int num_blocks,
tinyexr_uint64 chunk_offset, // starting offset of current chunk
bool is_multipart,
OffsetData& offset_data, // output block offsets, must be initialized
std::vector<std::vector<unsigned char> >& data_list, // output
tinyexr_uint64& total_size, // output: ending offset of current chunk
std::string* err) {
int num_scanlines = NumScanlines(exr_header->compression_type);
data_list.resize(num_blocks);
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
{
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
}
const void* compression_param = 0;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
std::string e;
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes, &e);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
compression_param = &zfp_compression_param;
}
#endif
tinyexr_uint64 offset = chunk_offset;
tinyexr_uint64 doffset = is_multipart ? 4u : 0u;
if (exr_image->tiles) {
const EXRImage* level_image = exr_image;
size_t block_idx = 0;
tinyexr::tinyexr_uint64 block_data_size = 0;
int num_levels = (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data.num_x_levels : (offset_data.num_x_levels * offset_data.num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
if (!level_image) {
if (err) {
(*err) += "Invalid number of tiled levels for EncodeChunk\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y,
exr_header->tile_level_mode, offset_data.num_x_levels);
if (level_index_from_image != level_index) {
if (err) {
(*err) += "Incorrect level ordering in tiled image\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
std::string e;
int ret = EncodeTiledLevel(level_image,
exr_header,
channels,
data_list,
block_idx,
num_x_tiles,
num_y_tiles,
channel_offset_list,
pixel_data_size,
compression_param,
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty() && err) {
(*err) += e;
}
return ret;
}
for (size_t j = 0; j < static_cast<size_t>(num_y_tiles); ++j)
for (size_t i = 0; i < static_cast<size_t>(num_x_tiles); ++i) {
offset_data.offsets[level_index][j][i] = offset;
swap8(reinterpret_cast<tinyexr_uint64*>(&offset_data.offsets[level_index][j][i]));
offset += data_list[block_idx].size() + doffset;
block_data_size += data_list[block_idx].size();
++block_idx;
}
level_image = level_image->next_level;
}
assert(static_cast<int>(block_idx) == num_blocks);
total_size = offset;
} else { // scanlines
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
std::vector<std::thread> workers;
std::atomic<int> block_count(0);
int num_threads = std::min(std::max(1, int(std::thread::hardware_concurrency())), num_blocks);
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = block_count++) < num_blocks) {
#else
bool invalid_data(false);
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
#endif
int start_y = num_scanlines * i;
int end_Y = (std::min)(num_scanlines * (i + 1), exr_image->height);
int num_lines = end_Y - start_y;
const unsigned char* const* images =
static_cast<const unsigned char* const*>(exr_image->images);
data_list[i].resize(2*sizeof(int));
size_t data_header_size = data_list[i].size();
bool ret = EncodePixelData(data_list[i],
images,
exr_header->requested_pixel_types,
exr_header->compression_type,
0, // increasing y
exr_image->width,
exr_image->height,
exr_image->width,
start_y,
num_lines,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue; // "break" cannot be used with OpenMP
}
assert(data_list[i].size() > data_header_size);
int data_len = static_cast<int>(data_list[i].size() - data_header_size);
memcpy(&data_list[i][0], &start_y, sizeof(int));
memcpy(&data_list[i][4], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[i][0]));
swap4(reinterpret_cast<int*>(&data_list[i][4]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode scanline data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size() + doffset;
}
total_size = static_cast<size_t>(offset);
}
return TINYEXR_SUCCESS;
}
// can save a single or multi-part image (no deep* formats)
static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory_out == NULL) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
{
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_headers[i]->compression_type < 0) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#else
for (int c = 0; c < exr_header->num_channels; ++c) {
if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) {
SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
}
}
std::vector<unsigned char> memory;
// Header
{
const char header[] = { 0x76, 0x2f, 0x31, 0x01 };
memory.insert(memory.end(), header, header + 4);
}
// Version
// using value from the first header
int long_name = exr_headers[0]->long_name;
{
char marker[] = { 2, 0, 0, 0 };
/* @todo
if (exr_header->non_image) {
marker[1] |= 0x8;
}
*/
// tiled
if (num_parts == 1 && exr_images[0].tiles) {
marker[1] |= 0x2;
}
// long_name
if (long_name) {
marker[1] |= 0x4;
}
// multipart
if (num_parts > 1) {
marker[1] |= 0x10;
}
memory.insert(memory.end(), marker, marker + 4);
}
int total_chunk_count = 0;
std::vector<int> chunk_count(num_parts);
std::vector<OffsetData> offset_data(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
if (!exr_images[i].tiles) {
int num_scanlines = NumScanlines(exr_headers[i]->compression_type);
chunk_count[i] =
(exr_images[i].height + num_scanlines - 1) / num_scanlines;
InitSingleResolutionOffsets(offset_data[i], chunk_count[i]);
total_chunk_count += chunk_count[i];
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
chunk_count[i] =
InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles);
total_chunk_count += chunk_count[i];
}
}
}
// Write attributes to memory buffer.
std::vector< std::vector<tinyexr::ChannelInfo> > channels(num_parts);
{
std::set<std::string> partnames;
for (unsigned int i = 0; i < num_parts; ++i) {
//channels
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_headers[i]->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_headers[i]->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_headers[i]->channels[c].name);
channels[i].push_back(info);
}
tinyexr::WriteChannelInfo(data, channels[i]);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_headers[i]->compression_type;
swap4(&comp);
WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char*>(&comp), 1);
}
{
int data[4] = { 0, 0, exr_images[i].width - 1, exr_images[i].height - 1 };
swap4(&data[0]);
swap4(&data[1]);
swap4(&data[2]);
swap4(&data[3]);
WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char*>(data), sizeof(int) * 4);
int data0[4] = { 0, 0, exr_images[0].width - 1, exr_images[0].height - 1 };
swap4(&data0[0]);
swap4(&data0[1]);
swap4(&data0[2]);
swap4(&data0[3]);
// Note: must be the same across parts (currently, using value from the first header)
WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char*>(data0), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
// Note: must be the same across parts
float aspectRatio = 1.0f;
swap4(&aspectRatio);
WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char*>(&aspectRatio), sizeof(float));
}
{
float center[2] = { 0.0f, 0.0f };
swap4(¢er[0]);
swap4(¢er[1]);
WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char*>(center), 2 * sizeof(float));
}
{
float w = 1.0f;
swap4(&w);
WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char*>(&w),
sizeof(float));
}
if (exr_images[i].tiles) {
unsigned char tile_mode = static_cast<unsigned char>(exr_headers[i]->tile_level_mode & 0x3);
if (exr_headers[i]->tile_rounding_mode) tile_mode |= (1u << 4u);
//unsigned char data[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
unsigned int datai[3] = { 0, 0, 0 };
unsigned char* data = reinterpret_cast<unsigned char*>(&datai[0]);
datai[0] = static_cast<unsigned int>(exr_headers[i]->tile_size_x);
datai[1] = static_cast<unsigned int>(exr_headers[i]->tile_size_y);
data[8] = tile_mode;
swap4(reinterpret_cast<unsigned int*>(&data[0]));
swap4(reinterpret_cast<unsigned int*>(&data[4]));
WriteAttributeToMemory(
&memory, "tiles", "tiledesc",
reinterpret_cast<const unsigned char*>(data), 9);
}
// must be present for multi-part files - according to spec.
if (num_parts > 1) {
// name
{
size_t len = 0;
if ((len = strlen(exr_headers[i]->name)) > 0) {
partnames.insert(std::string(exr_headers[i]->name));
if (partnames.size() != i + 1) {
SetErrorMessage("'name' attributes must be unique for a multi-part file", err);
return 0;
}
WriteAttributeToMemory(
&memory, "name", "string",
reinterpret_cast<const unsigned char*>(exr_headers[i]->name),
static_cast<int>(len));
} else {
SetErrorMessage("Invalid 'name' attribute for a multi-part file", err);
return 0;
}
}
// type
{
const char* type = "scanlineimage";
if (exr_images[i].tiles) type = "tiledimage";
WriteAttributeToMemory(
&memory, "type", "string",
reinterpret_cast<const unsigned char*>(type),
static_cast<int>(strlen(type)));
}
// chunkCount
{
WriteAttributeToMemory(
&memory, "chunkCount", "int",
reinterpret_cast<const unsigned char*>(&chunk_count[i]),
4);
}
}
// Custom attributes
if (exr_headers[i]->num_custom_attributes > 0) {
for (int j = 0; j < exr_headers[i]->num_custom_attributes; j++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_headers[i]->custom_attributes[j].name,
exr_headers[i]->custom_attributes[j].type,
reinterpret_cast<const unsigned char*>(
exr_headers[i]->custom_attributes[j].value),
exr_headers[i]->custom_attributes[j].size);
}
}
{ // end of header
memory.push_back(0);
}
}
}
if (num_parts > 1) {
// end of header list
memory.push_back(0);
}
tinyexr_uint64 chunk_offset = memory.size() + size_t(total_chunk_count) * sizeof(tinyexr_uint64);
tinyexr_uint64 total_size = 0;
std::vector< std::vector< std::vector<unsigned char> > > data_lists(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
std::string e;
int ret = EncodeChunk(&exr_images[i], exr_headers[i],
channels[i],
chunk_count[i],
// starting offset of current chunk after part-number
chunk_offset,
num_parts > 1,
offset_data[i], // output: block offsets, must be initialized
data_lists[i], // output
total_size, // output
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return 0;
}
chunk_offset = total_size;
}
// Allocating required memory
if (total_size == 0) { // something went wrong
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char*>(malloc(total_size));
// Writing header
memcpy((*memory_out), &memory[0], memory.size());
unsigned char* memory_ptr = *memory_out + memory.size();
size_t sum = memory.size();
// Writing offset data for chunks
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_images[i].tiles) {
const EXRImage* level_image = &exr_images[i];
int num_levels = (exr_headers[i]->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data[i].num_x_levels : (offset_data[i].num_x_levels * offset_data[i].num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) {
size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size();
sum += num_bytes;
assert(sum <= total_size);
memcpy(memory_ptr,
reinterpret_cast<unsigned char*>(&offset_data[i].offsets[level_index][j][0]),
num_bytes);
memory_ptr += num_bytes;
}
level_image = level_image->next_level;
}
} else {
size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(chunk_count[i]);
sum += num_bytes;
assert(sum <= total_size);
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data[i].offsets[0][0];
memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offsets[0]), num_bytes);
memory_ptr += num_bytes;
}
}
// Writing chunk data
for (unsigned int i = 0; i < num_parts; ++i) {
for (size_t j = 0; j < static_cast<size_t>(chunk_count[i]); ++j) {
if (num_parts > 1) {
sum += 4;
assert(sum <= total_size);
unsigned int part_number = i;
swap4(&part_number);
memcpy(memory_ptr, &part_number, 4);
memory_ptr += 4;
}
sum += data_lists[i][j].size();
assert(sum <= total_size);
memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size());
memory_ptr += data_lists[i][j].size();
}
}
assert(sum == total_size);
return total_size; // OK
}
} // tinyexr
size_t SaveEXRImageToMemory(const EXRImage* exr_image,
const EXRHeader* exr_header,
unsigned char** memory_out, const char** err) {
return tinyexr::SaveEXRNPartImageToMemory(exr_image, &exr_header, 1, memory_out, err);
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
size_t SaveEXRMultipartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2 ||
memory_out == NULL) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
return tinyexr::SaveEXRNPartImageToMemory(exr_images, exr_headers, num_parts, memory_out, err);
}
int SaveEXRMultipartImageToFile(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
const char* filename,
const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRMultipartImageToFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(&dx);
tinyexr::swap4(&dy);
tinyexr::swap4(&dw);
tinyexr::swap4(&dh);
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(&x);
tinyexr::swap4(&y);
tinyexr::swap4(&w);
tinyexr::swap4(&h);
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(&line_no);
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->next_level = NULL;
exr_image->level_x = 0;
exr_image->level_y = 0;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
EXRSetNameAttr(exr_header, NULL);
return TINYEXR_SUCCESS;
}
void EXRSetNameAttr(EXRHeader* exr_header, const char* name) {
if (exr_header == NULL) {
return;
}
memset(exr_header->name, 0, 256);
if (name != NULL) {
size_t len = std::min(strlen(name), (size_t)255);
if (len) {
memcpy(exr_header->name, name, len);
}
}
}
int EXRNumLevels(const EXRImage* exr_image) {
if (exr_image == NULL) return 0;
if(exr_image->images) return 1; // scanlines
int levels = 1;
const EXRImage* level_image = exr_image;
while((level_image = level_image->next_level)) ++levels;
return levels;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_image->next_level) {
FreeEXRImage(exr_image->next_level);
delete exr_image->next_level;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
memset(exr_header, 0, sizeof(EXRHeader));
ConvertHeader(exr_header, infos[i]);
exr_header->multipart = exr_version->multipart ? 1 : 0;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (err != 0) {
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<tinyexr::OffsetData> chunk_offset_table_list;
chunk_offset_table_list.reserve(num_parts);
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
chunk_offset_table_list.resize(chunk_offset_table_list.size() + 1);
tinyexr::OffsetData& offset_data = chunk_offset_table_list.back();
if (!exr_headers[i]->tiled || exr_headers[i]->tile_level_mode == TINYEXR_TILE_ONE_LEVEL) {
tinyexr::InitSingleResolutionOffsets(offset_data, exr_headers[i]->chunk_count);
std::vector<tinyexr::tinyexr_uint64>& offset_table = offset_data.offsets[0][0];
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
} else {
{
std::vector<int> num_x_tiles, num_y_tiles;
tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles);
if (num_blocks != exr_headers[i]->chunk_count) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_data.offsets[l][dy][dx] = offset + 4; // +4 to skip 'part number'
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
}
}
}
}
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
tinyexr::OffsetData &offset_data = chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
const unsigned char *part_number_addr =
memory + offset_data.offsets[l][dy][dx] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_data,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEFINED
#endif // TINYEXR_IMPLEMENTATION
|
ICP.h | ///////////////////////////////////////////////////////////////////////////////
/// "Sparse Iterative Closest Point"
/// by Sofien Bouaziz, Andrea Tagliasacchi, Mark Pauly
/// Copyright (C) 2013 LGG, EPFL
/// -- modified version --
///////////////////////////////////////////////////////////////////////////////
/// 1) This file contains different implementations of the ICP algorithm.
/// 2) This code requires EIGEN and NANOFLANN.
/// 3) If OPENMP is activated some part of the code will be parallelized.
/// 4) This code is for now designed for 3D registration
/// 5) Two main input types are Eigen::Matrix3Xd or Eigen::Map<Eigen::Matrix3Xd>
///////////////////////////////////////////////////////////////////////////////
/// namespace nanoflann: NANOFLANN KD-tree adaptor for EIGEN
/// namespace RigidMotionEstimator: functions to compute the rigid motion
/// namespace SICP: sparse ICP implementation
/// namespace ICP: reweighted ICP implementation
///////////////////////////////////////////////////////////////////////////////
//Note for Online Surface Reconstruction:
//The original implementation has been adapted slightly to conform to OSR data structures
#ifndef ICP_H
#define ICP_H
#include <Eigen/Dense>
#include <vector>
#include "osr/INeighborQueryable.h"
#include <nsessentials/util/TimedBlock.h>
#include "osr/common.h"
#include <iostream>
/// Compute the rigid motion for point-to-point and point-to-plane distances
namespace RigidMotionEstimator {
template <typename Derived1, typename Derived2, typename Derived3>
Eigen::Affine3f point_to_point(const Eigen::MatrixBase<Derived1>& X,
const Eigen::MatrixBase<Derived2>& Y,
const Eigen::MatrixBase<Derived3>& w,
const Eigen::Affine3f& initialTransform)
{
/// Normalize weight vector
Eigen::VectorXd w_normalized = w.template cast<double>()/(double)w.sum();
/// De-mean
Eigen::Vector3d X_mean, Y_mean;
for(int i=0; i<3; ++i)
{
X_mean(i) = (X.row(i).template cast<double>().array()*w_normalized.transpose().array()).sum();
Y_mean(i) = (Y.row(i).template cast<double>().array()*w_normalized.transpose().array()).sum();
}
X_mean = initialTransform.cast<double>() * X_mean;
/// Compute transformation
Eigen::Affine3d transformation = initialTransform.cast<double>();
transformation.pretranslate(-X_mean.cast<double>());
Eigen::Matrix3d sigma; sigma.setConstant(0.0);
for (int i = 0; i < X.cols(); ++i)
{
sigma += (initialTransform.template cast<double>() * X.col(i).template cast<double>() - X_mean) * (double)w_normalized.coeff(i) * (Y.col(i).template cast<double>() - Y_mean).transpose();
}
Eigen::JacobiSVD<Eigen::Matrix3d> svd(sigma, Eigen::ComputeFullU | Eigen::ComputeFullV);
Eigen::Affine3d rotation;
rotation.setIdentity();
if(svd.matrixU().determinant()*svd.matrixV().determinant() < 0.0) {
Eigen::Vector3d S = Eigen::Vector3d::Ones(); S(2) = -1.0;
rotation.linear().noalias() = svd.matrixV()*S.asDiagonal()*svd.matrixU().transpose();
} else {
rotation.linear().noalias() = svd.matrixV()*svd.matrixU().transpose();
}
transformation = rotation * transformation;
transformation.pretranslate(Y_mean);
/// Return transform
return transformation.cast<float>();
}
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
template <typename Derived1, typename Derived2>
inline Eigen::Affine3f point_to_point(const Eigen::MatrixBase<Derived1>& X,
const Eigen::MatrixBase<Derived2>& Y,
const Eigen::Affine3f& initialTransform)
{
return point_to_point(X, Y, Eigen::VectorXf::Ones(X.cols()), initialTransform);
}
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Target normals (one 3D normal per column)
/// @param Confidence weights
/// @param Right hand side
template <typename Derived1, typename Derived2, typename Derived3, typename Derived4, typename Derived5>
Eigen::Affine3f point_to_plane(const Eigen::MatrixBase<Derived1>& X,
const Eigen::MatrixBase<Derived2>& Y,
const Eigen::MatrixBase<Derived3>& N,
const Eigen::MatrixBase<Derived4>& w,
const Eigen::MatrixBase<Derived5>& u,
const Eigen::Affine3f& initialTransform) {
typedef Eigen::Matrix<double, 6, 6> Matrix66;
typedef Eigen::Matrix<double, 6, 1> Vector6;
typedef Eigen::Block<Matrix66, 3, 3> Block33;
/// Normalize weight vector
Eigen::VectorXd w_normalized = w.template cast<double>()/(double)w.sum();
/// De-mean
Eigen::Vector3d X_mean;
for(int i=0; i<3; ++i)
X_mean(i) = (X.row(i).template cast<double>().array()*w_normalized.transpose().array()).sum();
X_mean = initialTransform.cast<double>() * X_mean;
Eigen::Affine3d transformation = initialTransform.cast<double>();
transformation.pretranslate(-X_mean);
/// Prepare LHS and RHS
Matrix66 LHS = Matrix66::Zero();
Vector6 RHS = Vector6::Zero();
Block33 TL = LHS.topLeftCorner<3,3>();
Block33 TR = LHS.topRightCorner<3,3>();
Block33 BR = LHS.bottomRightCorner<3,3>();
Eigen::MatrixXd C = Eigen::MatrixXd::Zero(3,X.cols());
#pragma omp parallel
{
#pragma omp for
for(int i=0; i<X.cols(); i++) {
C.col(i) = ((initialTransform * X.col(i)).template cast<double>() - X_mean).cross(N.col(i).template cast<double>());
}
#pragma omp sections nowait
{
#pragma omp section
for(int i=0; i<X.cols(); i++) TL.selfadjointView<Eigen::Upper>().rankUpdate(C.col(i), w(i));
#pragma omp section
for(int i=0; i<X.cols(); i++) TR += (C.col(i) * N.col(i).template cast<double>().transpose())*w(i);
#pragma omp section
for(int i=0; i<X.cols(); i++) BR.selfadjointView<Eigen::Upper>().rankUpdate(N.col(i).template cast<double>(), w(i));
#pragma omp section
for(int i=0; i<C.cols(); i++) {
double dist_to_plane = -(((initialTransform * X.col(i)).template cast<double>() - Y.col(i).template cast<double>()).dot(N.col(i).template cast<double>()) - u(i)) * w(i);
RHS.head<3>() += C.col(i)*dist_to_plane;
RHS.tail<3>() += N.col(i).template cast<double>() * dist_to_plane;
}
}
}
LHS = LHS.selfadjointView<Eigen::Upper>();
/// Compute transformation
Eigen::LDLT<Matrix66> ldlt(LHS);
RHS = ldlt.solve(RHS);
transformation = Eigen::AngleAxisd(RHS(0), Eigen::Vector3d::UnitX()) *
Eigen::AngleAxisd(RHS(1), Eigen::Vector3d::UnitY()) *
Eigen::AngleAxisd(RHS(2), Eigen::Vector3d::UnitZ()) * transformation;
transformation.pretranslate(RHS.tail<3>());
transformation.pretranslate(X_mean);
return transformation.cast<float>();
}
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Target normals (one 3D normal per column)
/// @param Confidence weights
template <typename Derived1, typename Derived2, typename Derived3, typename Derived4>
inline Eigen::Affine3f point_to_plane(const Eigen::MatrixBase<Derived1>& X,
const Eigen::MatrixBase<Derived2>& Yp,
const Eigen::MatrixBase<Derived3>& Yn,
const Eigen::MatrixBase<Derived4>& w,
const Eigen::Affine3f& initialTransform) {
return point_to_plane(X, Yp, Yn, w, Eigen::VectorXf::Zero(X.cols()), initialTransform);
}
}
///////////////////////////////////////////////////////////////////////////////
/// ICP implementation using ADMM/ALM/Penalty method
namespace SICP {
struct Parameters {
bool use_penalty = false; /// if use_penalty then penalty method else ADMM or ALM (see max_inner)
float p = 1.0f; /// p norm
float mu = 10.0f; /// penalty weight
float alpha = 1.2f; /// penalty increase factor
float max_mu = 1e5f; /// max penalty
int max_icp = 100; /// max ICP iteration
int max_outer = 100; /// max outer iteration
int max_inner = 1; /// max inner iteration. If max_inner=1 then ADMM else ALM
float stop = 1e-5f; /// stopping criteria
bool print_icpn = false; /// (debug) print ICP iteration
};
/// Shrinkage operator (Automatic loop unrolling using template)
template<unsigned int I>
inline float shrinkage(float mu, float n, float p, float s) {
return shrinkage<I-1>(mu, n, p, 1.0f - (p/mu)*std::pow(n, p-2.0f)*std::pow(s, p-1.0f));
}
template<>
inline float shrinkage<0>(float, float, float, float s) {return s;}
/// 3D Shrinkage for point-to-point
template<unsigned int I>
inline void shrink(Eigen::Matrix3Xf& dirField, float mu, float p) {
float Ba = std::pow((2.0f/mu)*(1.0f-p), 1.0f/(2.0f-p));
float ha = Ba + (p/mu)*std::pow(Ba, p-1.0f);
#pragma omp parallel for
for(int i=0; i<dirField.cols(); ++i) {
float n = dirField.col(i).norm();
float w = 0.0;
if(n > ha) w = shrinkage<I>(mu, n, p, (Ba/n + 1.0f)/2.0f);
dirField.col(i) *= w;
}
}
/// 1D Shrinkage for point-to-plane
template<unsigned int I>
inline void shrink(Eigen::VectorXf& y, float mu, float p) {
float Ba = std::pow((2.0/mu)*(1.0-p), 1.0/(2.0-p));
float ha = Ba + (p/mu)*std::pow(Ba, p-1.0);
#pragma omp parallel for
for(int i=0; i<y.rows(); ++i) {
float n = std::abs(y(i));
float s = 0.0;
if(n > ha) s = shrinkage<I>(mu, n, p, (Ba/n + 1.0)/2.0);
y(i) *= s;
}
}
/// Sparse ICP with point to point
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Parameters
template <typename Index>
inline Eigen::Affine3f point_to_point(const osr::Matrix3Xf& X, const osr::Matrix3Xf& N,
const osr::IPointQueryable<Index>& Y,
Parameters par = Parameters())
{
/// Buffers
Eigen::Matrix3Xf dirField = Eigen::Matrix3Xf::Zero(3, X.cols());
Eigen::Matrix3Xf Z = Eigen::Matrix3Xf::Zero(3, X.cols());
Eigen::Matrix3Xf C = Eigen::Matrix3Xf::Zero(3, X.cols());
Eigen::Affine3f transform = Eigen::Affine3f::Identity();
Eigen::Matrix3Xf Xo1 = X;
Eigen::Matrix3Xf Xo2 = X;
Eigen::VectorXf w = Eigen::VectorXf::Ones(X.cols());
/// ICP
for (int icp = 0; icp<par.max_icp; ++icp) {
if (par.print_icpn) std::cout << "Iteration #" << icp << "/" << par.max_icp << std::endl;
/// Find closest point
{
nse::util::TimedBlock b("Finding correspondences ..");
#pragma omp parallel for
for (int i = 0; i < X.cols(); ++i)
{
Eigen::Vector3f px = transform * X.col(i);
auto idx = Y.findClosestCompatiblePoint(px, transform.linear() * N.col(i));
if (Y.isIndexValid(idx))
{
dirField.col(i) = Y.neighborP(idx);
auto d = dirField.col(i) - px;
auto ld = d.squaredNorm();
w(i) = 1;
}
else
{
dirField.col(i).setZero();
w(i) = 0;
}
}
}
//Compute rotation and translation
double mu = par.mu;
for (int outer = 0; outer<par.max_outer; ++outer) {
double dual = 0.0;
for (int inner = 0; inner<par.max_inner; ++inner) {
//Z update (shrinkage)
Z = transform * X - dirField + C / mu;
shrink<3>(Z, mu, par.p);
// Rotation and translation update
Eigen::Matrix3Xf U = dirField + Z - C / mu;
transform = RigidMotionEstimator::point_to_point(X, U, w, transform);
// Stopping criteria
dual = (transform * X - Xo1).colwise().norm().maxCoeff();
Xo1 = transform * X;
if (dual < par.stop) break;
}
// C update (lagrange multipliers)
Eigen::Matrix3Xf P = transform * X - dirField - Z;
if (!par.use_penalty) C.noalias() += mu*P;
// mu update (penalty)
if (mu < par.max_mu) mu *= par.alpha;
// Stopping criteria
double primal = P.colwise().norm().maxCoeff();
if (primal < par.stop && dual < par.stop) break;
}
//transform = RigidMotionEstimator::point_to_point(X.V(), dirField, w, transform);
/// Stopping criteria
double stop = (transform * X - Xo2).colwise().norm().maxCoeff();
Xo2 = transform * X;
if (stop < par.stop) break;
}
return transform;
}
/// Sparse ICP with point to plane
/// @param Source (one 3D point per column)
/// @param Target (one 3D point per column)
/// @param Target normals (one 3D normal per column)
/// @param Parameters
template <typename Index>
Eigen::Affine3f point_to_plane(const osr::Matrix3Xf& X, const osr::Matrix3Xf& N,
const osr::IPointQueryable<Index>& Y,
Parameters par = Parameters())
{
/// Buffers
Eigen::Matrix3Xf Qp = Eigen::Matrix3Xf::Zero(3, X.cols());
Eigen::Matrix3Xf Qn = Eigen::Matrix3Xf::Zero(3, X.cols());
Eigen::VectorXf Z = Eigen::VectorXf::Zero(X.cols());
Eigen::VectorXf C = Eigen::VectorXf::Zero(X.cols());
Eigen::Affine3f transform = Eigen::Affine3f::Identity();
Eigen::Matrix3Xf Xo1 = X;
Eigen::Matrix3Xf Xo2 = X;
Eigen::VectorXf w = Eigen::VectorXf::Ones(X.cols());
/// ICP
for(int icp=0; icp<par.max_icp; ++icp) {
if(par.print_icpn) std::cout << "Iteration #" << icp << "/" << par.max_icp << std::endl;
/// Find closest point
{
nse::util::TimedBlock b("Finding correspondences ..");
float sumWeight = 0;
#pragma omp parallel for reduction(+ : sumWeight)
for (int i = 0; i < X.cols(); ++i)
{
Eigen::Vector3f px = transform * X.col(i);
auto idx = Y.findClosestCompatiblePoint(px, transform.linear() * N.col(i));
if (Y.isIndexValid(idx))
{
Qp.col(i) = Y.neighborP(idx);
Qn.col(i) = Y.neighborN(idx);
w(i) = 1;
sumWeight += w(i);
}
else
{
Qp.col(i).setZero();
Qn.col(i) = Eigen::Vector3f::UnitX();
w(i) = 0;
}
}
if (sumWeight < 3)
return transform;
}
/// Compute rotation and translation
float mu = par.mu;
for(int outer=0; outer<par.max_outer; ++outer) {
float dual = 0.0;
for(int inner=0; inner<par.max_inner; ++inner) {
/// Z update (shrinkage)
Eigen::Matrix3Xf transformedX = transform * X;
Z = (Qn.array()*(transformedX - Qp).array()).colwise().sum().transpose()+C.array()/mu;
shrink<3>(Z, mu, par.p);
/// Rotation and translation update
Eigen::VectorXf U = Z-C/mu;
transform = RigidMotionEstimator::point_to_plane(X, Qp, Qn, w, U, transform);
/// Stopping criteria
dual = (transform * X - Xo1).colwise().norm().maxCoeff();
Xo1 = transform * X;
if(dual < par.stop) break;
}
/// C update (lagrange multipliers)
Eigen::VectorXf P = ((transform.linear() * Qn).array()*(transform * X - Qp).array()).colwise().sum().transpose()-Z.array();
if(!par.use_penalty) C.noalias() += mu*P;
/// mu update (penalty)
if(mu < par.max_mu) mu *= par.alpha;
/// Stopping criteria
float primal = P.array().abs().maxCoeff();
if(primal < par.stop && dual < par.stop) break;
}
/// Stopping criteria
float stop = (transform * X - Xo2).colwise().norm().maxCoeff();
Xo2 = transform * X;
if(stop < par.stop) break;
}
return transform;
}
}
#endif
|
GB_binop__max_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__max_uint8
// A.*B function (eWiseMult): GB_AemultB__max_uint8
// A*D function (colscale): GB_AxD__max_uint8
// D*A function (rowscale): GB_DxB__max_uint8
// C+=B function (dense accum): GB_Cdense_accumB__max_uint8
// C+=b function (dense accum): GB_Cdense_accumb__max_uint8
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__max_uint8
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__max_uint8
// C=scalar+B GB_bind1st__max_uint8
// C=scalar+B' GB_bind1st_tran__max_uint8
// C=A+scalar GB_bind2nd__max_uint8
// C=A'+scalar GB_bind2nd_tran__max_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IMAX (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT8 || GxB_NO_MAX_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__max_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__max_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__max_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__max_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__max_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__max_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__max_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__max_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__max_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__max_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB_bind1st_tran__max_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__max_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
copyin-2.c | /* { dg-do run } */
/* { dg-options "-O2" } */
/* { dg-require-effective-target tls_runtime } */
#include <omp.h>
#include <stdlib.h>
struct { int t; char buf[64]; } thr = { 32, "" };
#pragma omp threadprivate (thr)
int
main (void)
{
int l = 0;
omp_set_dynamic (0);
omp_set_num_threads (6);
#pragma omp parallel copyin (thr) reduction (||:l)
{
l = thr.t != 32;
thr.t = omp_get_thread_num () + 11;
}
if (l || thr.t != 11)
abort ();
#pragma omp parallel reduction (||:l)
l = thr.t != omp_get_thread_num () + 11;
if (l)
abort ();
return 0;
}
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*9 + q*9;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i+1 < outh; i+=2)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
}
|
if97_lib_test.c | // Copyright Martin Lord 2014-2015.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/* *****************************************************************************
* A SHORT PROGRAMME TO CHECK THE LIBRARY
* *******************************************************************************/
#include "if97_lib_test.h"
#include "if97_lib.h"
#include "IF97_common.h"
#include "iapws_surftens.h"
#include "solve_test.h"
#include "IF97_Region1_test.h"
#include "IF97_Region2_test.h"
#include "IF97_Region3_test.h"
#include "IF97_Region4_test.h"
#include "IF97_B23_test.h"
#include <stdio.h>
#include <math.h> // for fabs
#include "winsteam_compatibility.h"
int main (int argc, char **argv)
{
int intermediateResult = TEST_FAIL;
FILE *pTestLog; // create pointer to a file. the FILE type is defined in <stdio.h>
pTestLog = fopen(TESTLOGLOC, "w"); // open an empty file to write test results to. pTestLog is a pointer to it
// check threading using OpenMP test
fprintf ( pTestLog, "\n\n************************************************************ \n" );
fprintf ( pTestLog, "*** CHECK FOR MULTITHREADDING ***\n\n" );
#pragma omp parallel
{
fprintf ( pTestLog, "thread\n");
}
// *** test region 1 module ***
intermediateResult = if97_region1_test (pTestLog);
resultSummary ("IF97_Region1 module", pTestLog, intermediateResult);
// *** test region 2 module ***
intermediateResult = if97_region2_test (pTestLog);
resultSummary ("IF97_Region2 module", pTestLog, intermediateResult);
// *** test region 2-3 boundary line equation ***
intermediateResult = if97_B23_test (pTestLog);
resultSummary ("IF97_B23 module", pTestLog, intermediateResult);
// *** test region 3 module ***
intermediateResult = if97_region3_test (pTestLog);
resultSummary ("IF97_Region3 module", pTestLog, intermediateResult);
// *** test region 4 module ***
intermediateResult = if97_region4_test (pTestLog);
resultSummary ("IF97_Region4 module", pTestLog, intermediateResult);
// *** test region 5 module ***
intermediateResult = if97_region5_test (pTestLog);
resultSummary ("IF97_Region5 module", pTestLog, intermediateResult);
//**** test the solver *****
//intermediateResult = solve_test (pTestLog);
//resultSummary ("solver module", pTestLog, intermediateResult);
// *** Testing iapws_surftens ******
intermediateResult = if97_surftens_test (pTestLog);
resultSummary ("iapws_surftens", pTestLog, intermediateResult);
// *** test library module ***
intermediateResult = if97_lib_test (pTestLog);
resultSummary ("IF97_lib library", pTestLog, intermediateResult);
fclose (pTestLog); //close the test log file. note result = EOF on error
printf ("Test log results can be found in "); printf( TESTLOGLOC );
return 0;
}
// test a double input function. Pass = 0. See IF97_Common.h for failure codes.
//Function outputs more detail to logfile if VERBOSE_TEST is set to true
int testSingleInput ( double (*func) (double), double input, double expectedOutput, double tol, int tolType, char* funcName, FILE *logFile)
{
int testResult = TEST_FAIL; // initialize as a fail.
double error;
switch (tolType) {
case SIG_FIG:
error = fabs(((*func)(input) - expectedOutput)/expectedOutput);
if ( error <= pow(10,-tol )){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g) = %.8g \t Expected: %.8g \t Error ratio : %e \t PASS\n", funcName, input, (*func)(input), expectedOutput, error );
}
else {
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g ) = %.8g \t Expected: %.8g \t Error ratio : %e \t FAIL\n", funcName, input, (*func)(input), expectedOutput, error );
}
break;
case ABS:
error = fabs((*func)(input) - expectedOutput);
if ( error <= tol ){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g ) = %.8g \t Expected: %.8g \t Error : %e \t PASS\n", funcName, input, (*func)(input), expectedOutput, error );
}
else {
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g ) = %.8g \t Expected: %.8g \t Error : %e \t FAIL\n", funcName, input, (*func)(input), expectedOutput, error );
}
break;
case PERCENT:
error = fabs(((*func)(input) - expectedOutput)/expectedOutput)*100;
if ( error <= tol ){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g ) = %.8g \t Expected: %.4g \t Error : %e % \t PASS\n", funcName, input, (*func)(input), expectedOutput, error );
}
else {
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g ) = %.8g \t Expected: %.4g \t Error : %e % \t FAIL\n", funcName, input, (*func)(input), expectedOutput, error );
}
break;
}
return testResult;
}
// test a double input function. Pass = 0. See IF97_Common.h for failure codes.
//Function outputs more detail to logfile if VERBOSE_TEST is true
int testDoubleInput ( double (*func) (double, double), double input1, double input2, double expectedOutput, double tol, int tolType, char* funcName, FILE *logFile)
{
int testResult = TEST_FAIL; // initialize as a fail.
double error;
switch (tolType) {
case SIG_FIG:
error = fabs(((*func)(input1, input2) - expectedOutput)/expectedOutput);
if ( error <= pow(10,-tol )){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %.8g \t Expected: %.8g \t Error ratio : %e \t PASS\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput, error );
}
else {
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %.8g \t Expected: %.8g \t Error ratio : %e \t FAIL\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput, error );
}
break;
case ABS:
error = fabs((*func)(input1, input2) - expectedOutput);
if ( error <= tol ){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %.8g \t Expected: %.8g \t Error : %e \t PASS\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput, error );
}
else {
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %.8g \t Expected: %.8g \t Error : %e \t FAIL\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput, error );
}
break;
case PERCENT:
error = fabs(((*func)(input1, input2) - expectedOutput)/expectedOutput)*100;
if ( error <= tol ){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %.8g \t Expected: %.4g \t Error : %e % \t PASS\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput, error );
}
else {
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %.8g \t Expected: %.4g \t Error : %e % \t FAIL\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput, error );
}
break;
}
return testResult;
}
// test a double input function with boolean output. Pass = 0. See IF97_Common.h for failure codes.
//Function outputs more detail to logfile if VERBOSE_TEST is set to true
int testBoolDoubleInput ( bool (*func) (double, double), double input1, double input2, bool expectedOutput, char* funcName, FILE *logFile){
int testResult = TEST_FAIL; // initialize as a fail.
if ((*func)(input1, input2) == expectedOutput){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %s \t Expected: %s \t \t PASS\n", funcName, input1, input2, (*func)(input1, input2) ? "true" : "false", expectedOutput ? "true" : "false");
}
else{
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %s \t Expected: %s \t \t FAIL\n", funcName, input1, input2, (*func)(input1, input2) ? "true" : "false", expectedOutput ? "true" : "false");
}
return testResult;
}
// test a double input function with boolean output. Pass = 0. See IF97_Common.h for failure codes.
//Function outputs more detail to logfile if VERBOSE_TEST is set to true
int testCharDoubleInput ( char (*func) (double, double), double input1, double input2, char expectedOutput, char* funcName, FILE *logFile){
int testResult = TEST_FAIL; // initialize as a fail.
if ((*func)(input1, input2) == expectedOutput){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %c \t Expected: %c \t \t PASS\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput);
}
else{
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %c \t Expected: %c \t \t FAIL\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput);
}
return testResult;
}
// prints a unit test summary to the log based on the test code
void resultSummary (char* funcName, FILE *logFile, int testCode)
{
fprintf (logFile, "\nResults summary for %s : Error code %i \n", funcName, testCode);
if (testCode == TEST_PASS)
fprintf(logFile, "PASS");
else
{
if (testCode & TEST_FAIL)
fprintf(logFile, "FAIL\n");
if (testCode & TEST_INCORRECT)
fprintf(logFile, "Some or all elements generate incorrect results\n");
if (testCode & TEST_INCOMPLETE)
fprintf(logFile, "Some or all elements are not yet complete\n");
}
fprintf (logFile, "\n\n");
}
// *** Testing iapws_surftens ******
int if97_surftens_test (FILE *logFile){
const double If97_surf_acc = 0.07; // Highest difference in table 1 of IAPWS R1-76(2014)
int intermediateResult = TEST_PASS;
fprintf ( logFile, "\n\n *** Testing iapws_surftens *** \n\n" );
intermediateResult = intermediateResult | testSingleInput (iapws_surftens, 0.01, 75.65, If97_surf_acc, ABS, "iapws_surftens", logFile);
intermediateResult = intermediateResult | testSingleInput (iapws_surftens, 20.0, 72.74, If97_surf_acc, ABS, "iapws_surftens", logFile);
intermediateResult = intermediateResult | testSingleInput (iapws_surftens, 100.0, 58.91, If97_surf_acc, ABS, "iapws_surftens", logFile);
intermediateResult = intermediateResult | testSingleInput (iapws_surftens, 200.0, 37.67, If97_surf_acc, ABS, "iapws_surftens", logFile);
intermediateResult = intermediateResult | testSingleInput (iapws_surftens, 300.0, 14.36, If97_surf_acc, ABS, "iapws_surftens", logFile);
intermediateResult = intermediateResult | testSingleInput (iapws_surftens, 370.0, 0.39, If97_surf_acc, ABS, "iapws_surftens", logFile);
intermediateResult = intermediateResult | testSingleInput (iapws_surftens, IF97_TC - IF97_T_TRIP, 0.0, If97_surf_acc, ABS, "iapws_surftens", logFile);
if (intermediateResult != 0)
intermediateResult= intermediateResult | TEST_FAIL;
return intermediateResult;
}
int if97_lib_test (FILE *logFile){
int intermediateResult;
// *** Testing if97_pt_h ******
intermediateResult = TEST_PASS;
fprintf ( logFile, "\n\n *** Testing if97_pt_h *** \n\n" );
intermediateResult = intermediateResult | testDoubleInput (if97_pt_h, 3.0, 300.0, 1.15331273e02, TEST_ACCURACY, SIG_FIG, "if97_pt_h", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_h, 80.0, 300.0, 1.84142828e02, TEST_ACCURACY, SIG_FIG,"if97_pt_h", logFile);
fprintf ( logFile, "\n1/if97_R3bw_v_pt(%.9g, %.9g) = %.9g", IF97_PC + 0.000001, IF97_TC + 0.000001, 1/if97_R3bw_v_pt(IF97_PC + 0.000001, IF97_TC + 0.000001));
fprintf ( logFile, "\n1/if97_pt_v(%.9g, %.9g) = %.9g\n", IF97_PC + 0.000001, IF97_TC + 0.000001, 1/if97_pt_v(IF97_PC + 0.000001, IF97_TC + 0.000001));
intermediateResult = intermediateResult | testDoubleInput (if97_pt_v, IF97_PC + 0.000001, IF97_TC + 0.000001, 1/324.23752306, TEST_ACCURACY, SIG_FIG,"if97_pt_v", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_h, IF97_PC + 0.000001, IF97_TC + 0.000001, 2083.817978619541, TEST_ACCURACY, SIG_FIG,"if97_pt_h", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_h, 30.0, 1500.0, 5.16723514e03,TEST_ACCURACY, SIG_FIG,"if97_pt_h", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_u, IF97_PC + 0.000001, IF97_TC + 0.000001, 2015.769096450988, TEST_ACCURACY, SIG_FIG,"if97_pt_u", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_s, IF97_PC + 0.000001, IF97_TC + 0.000001, 4.406259014859, TEST_ACCURACY, SIG_FIG,"if97_pt_s", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_Cv, IF97_PC + 0.000001, IF97_TC + 0.000001, 4.527598110108, TEST_ACCURACY, SIG_FIG,"if97_pt_Cv", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_Cp, IF97_PC + 0.000001, IF97_TC + 0.000001, 4.54022408404e5, TEST_ACCURACY, SIG_FIG,"if97_pt_Cp", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_Vs, IF97_PC + 0.000001, IF97_TC + 0.000001, 314.252078309417, TEST_ACCURACY, SIG_FIG,"if97_pt_Vs", logFile);
resultSummary ("if97_pt_h", logFile, intermediateResult);
if (intermediateResult != 0)
intermediateResult= intermediateResult | TEST_FAIL;
return intermediateResult;
} |
GB_binop__eq_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__eq_fp32
// A.*B function (eWiseMult): GB_AemultB__eq_fp32
// A*D function (colscale): GB_AxD__eq_fp32
// D*A function (rowscale): GB_DxB__eq_fp32
// C+=B function (dense accum): GB_Cdense_accumB__eq_fp32
// C+=b function (dense accum): GB_Cdense_accumb__eq_fp32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_fp32
// C=scalar+B GB_bind1st__eq_fp32
// C=scalar+B' GB_bind1st_tran__eq_fp32
// C=A+scalar GB_bind2nd__eq_fp32
// C=A'+scalar GB_bind2nd_tran__eq_fp32
// C type: bool
// A type: float
// B,b type: float
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_FP32 || GxB_NO_EQ_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__eq_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__eq_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__eq_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__eq_fp32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__eq_fp32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__eq_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__eq_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__eq_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__eq_fp32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__eq_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__eq_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB103-master-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A master directive is used to protect memory accesses.
*/
#include "omprace.h"
#include <omp.h>
#include <stdio.h>
int main()
{
omprace_init();
int k;
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
omprace_fini();
return 0;
}
|
sirius.h | // Copyright (c) 2013-2019 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file sirius.h
*
* \brief "All-in-one" include file.
*/
#ifndef __SIRIUS_H__
#define __SIRIUS_H__
#if defined(__APEX)
#include <apex_api.hpp>
#endif
#include "utils/cmd_args.hpp"
#include "utils/json.hpp"
using json = nlohmann::json;
#include "input.hpp"
#include "radial_solver.hpp"
#include "SHT/sht.hpp"
#include "SHT/gaunt.hpp"
#include "sddk.hpp"
#include "hdf5_tree.hpp"
#include "Band/band.hpp"
#include "dft_ground_state.hpp"
#if defined(__PLASMA)
extern "C" void plasma_init(int num_cores);
#endif
#if defined(__LIBSCI_ACC)
extern "C" void libsci_acc_init();
extern "C" void libsci_acc_finalize();
#endif
/// Namespace of the SIRIUS library.
namespace sirius {
json sirius_options_parser_;
/// Return the status of the library (initialized or not).
inline static bool& is_initialized()
{
static bool b{false};
return b;
}
/// Initialize the library.
inline void initialize(bool call_mpi_init__ = true)
{
if (is_initialized()) {
TERMINATE("SIRIUS library is already initialized");
}
if (call_mpi_init__) {
Communicator::initialize(MPI_THREAD_MULTIPLE);
}
if (Communicator::world().rank() == 0) {
printf("SIRIUS %i.%i.%i, git hash: %s\n", major_version, minor_version, revision, git_hash);
#if !defined(NDEBUG)
printf("Warning! Compiled in 'debug' mode with assert statemsnts enabled!\n");
#endif
}
/* get number of ranks per node during the global call to sirius::initialize() */
sddk::num_ranks_per_node();
if (acc::num_devices() > 0) {
int devid = sddk::get_device_id(acc::num_devices());
#pragma omp parallel
{
#pragma omp critical
acc::set_device_id(devid);
}
acc::create_streams(omp_get_max_threads() + 1);
#if defined(__GPU)
gpublas::create_stream_handles();
#endif
#if defined(__CUDA)
cublas::xt::create_handle();
cusolver::create_handle();
#endif
}
#if defined(__APEX)
apex::init("sirius", Communicator::world().rank(), Communicator::world().size());
#endif
utils::start_global_timer();
#if defined(__MAGMA)
magma::init();
#endif
#if defined(__PLASMA)
plasma_init(omp_get_max_threads());
#endif
#if defined(__LIBSCI_ACC)
libsci_acc_init();
#endif
/* for the fortran interface to blas/lapack */
assert(sizeof(int) == 4);
assert(sizeof(double) == 8);
is_initialized() = true;
}
/// Shut down the library.
inline void finalize(bool call_mpi_fin__ = true, bool reset_device__ = true, bool fftw_cleanup__ = true)
{
if (!is_initialized()) {
TERMINATE("SIRIUS library was not initialized");
}
#if defined(__MAGMA)
magma::finalize();
#endif
#if defined(__LIBSCI_ACC)
libsci_acc_finalize();
#endif
if (acc::num_devices()) {
//acc::set_device();
#if defined(__GPU)
gpublas::destroy_stream_handles();
#endif
#if defined(__CUDA)
cusolver::destroy_handle();
cublas::xt::destroy_handle();
#endif
acc::destroy_streams();
if (reset_device__) {
acc::reset();
}
}
if (fftw_cleanup__) {
fftw_cleanup();
}
utils::stop_global_timer();
#if defined(__APEX)
apex::finalize();
#endif
if (call_mpi_fin__) {
Communicator::finalize();
}
is_initialized() = false;
}
}
#endif // __SIRIUS_H__
//SIRIUS is a domain-specific library for electronic structure calculations. It supports full-potential linearized
//augmented plane wave (FP-LAPW) and pseudopotential plane wave (PP-PW) methods and is designed to work with codes
//such as Exciting, Elk and Quantum ESPRESSO.
/**
\mainpage Welcome to SIRIUS
*/
//! \page stdvarname Standard variable names
//!
//! Below is the list of standard names for some of the loop variables:
//!
//! l - index of orbital quantum number \n
//! m - index of azimutal quantum nuber \n
//! lm - combined index of (l,m) quantum numbers \n
//! ia - index of atom \n
//! ic - index of atom class \n
//! iat - index of atom type \n
//! ir - index of r-point \n
//! ig - index of G-vector \n
//! idxlo - index of local orbital \n
//! idxrf - index of radial function \n
//! xi - combined index of lm and idxrf (product of angular and radial functions) \n
//! ik - index of k-point \n
//! itp - index of (theta, phi) spherical angles \n
//!
//! The _loc suffix is often added to the variables to indicate that they represent the local fraction of the elements
//! assigned to the given MPI rank.
//!
//! \page coding Coding style
//!
//! Below are some basic style rules that we follow:
//! - Page width is approximately 120 characters. Screens are wide nowdays and 80 characters is an
//! obsolete restriction. Going slightly over 120 characters is allowed if it is requird for the line continuity.
//! - Indentation: 4 spaces (no tabs)
//! - Comments are inserted before the code with slash-star style starting with the lower case:
//! \code{.cpp}
//! /* call a very important function */
//! do_something();
//! \endcode
//! - Spaces between most operators:
//! \code{.cpp}
//! if (i < 5) {
//! j = 5;
//! }
//!
//! for (int k = 0; k < 3; k++)
//!
//! int lm = l * l + l + m;
//!
//! double d = std::abs(e);
//!
//! int k = idx[3];
//! \endcode
//! - Spaces between function arguments:
//! \code{.cpp}
//! double d = some_func(a, b, c);
//! \endcode
//! but not
//! \code{.cpp}
//! double d=some_func(a,b,c);
//! \endcode
//! or
//! \code{.cpp}
//! double d = some_func( a, b, c );
//! \endcode
//! - Spaces between template arguments, but not between <> brackets:
//! \code{.cpp}
//! std::vector<std::array<int, 2>> vec;
//! \endcode
//! but not
//! \code{.cpp}
//! std::vector< std::array< int, 2 > > vec;
//! \endcode
//! - Curly braces for classes and functions start form the new line:
//! \code{.cpp}
//! class A
//! {
//! ....
//! };
//!
//! inline int num_points()
//! {
//! return num_points_;
//! }
//! \endcode
//! - Curly braces for if-statements, for-loops, switch-case statements, etc. start at the end of the line:
//! \code{.cpp}
//! for (int i: {0, 1, 2}) {
//! some_func(i);
//! }
//!
//! if (a == 0) {
//! printf("a is zero");
//! } else {
//! printf("a is not zero");
//! }
//!
//! switch (i) {
//! case 1: {
//! do_something();
//! break;
//! case 2: {
//! do_something_else();
//! break;
//! }
//! }
//! \endcode
//! - Even single line 'if' statements and 'for' loops must have the curly brackes:
//! \code{.cpp}
//! if (i == 4) {
//! some_variable = 5;
//! }
//!
//! for (int k = 0; k < 10; k++) {
//! do_something(k);
//! }
//! \endcode
//! - Reference and pointer symbols are part of type:
//! \code{.cpp}
//! std::vector<double>& vec = make_vector();
//!
//! double* ptr = &vec[0];
//!
//! auto& atom = unit_cell().atom(ia);
//! \endcode
//! - Const modifier follows the type declaration:
//! \code{.cpp}
//! std::vector<int> const& idx() const
//! {
//! return idx_;
//! }
//! \endcode
//! - Names of class members end with underscore:
//! \code{.cpp}
//! class A
//! {
//! private:
//! int lmax_;
//! };
//! \endcode
//! - Setter method starts from set_, getter method is a variable name itself:
//! \code{.cpp}
//! class A
//! {
//! private:
//! int lmax_;
//! public:
//! int lmax() const
//! {
//! return lmax_;
//! }
//! void set_lmax(int lmax__)
//! {
//! lmax_ = lmax__;
//! }
//! };
//! \endcode
//! However, the new style for setter methods is preferable:
//! \code{.cpp}
//! class A
//! {
//! private:
//! int lmax_;
//! public:
//! int lmax() const
//! {
//! return lmax_;
//! }
//! int lmax(int lmax__)
//! {
//! lmax_ = lmax__;
//! return lmax_;
//! }
//! };
//! \endcode
//! - Single-line functions should not be flattened:
//! \code{.cpp}
//! struct A
//! {
//! int lmax() const
//! {
//! return lmax_;
//! }
//! };
//! \endcode
//! but not
//! \code{.cpp}
//! struct A
//! {
//! int lmax() const { return lmax_; }
//! };
//! \endcode
//! - Header guards have a standard name: double underscore + file name in capital letters + double underscore
//! \code{.cpp}
//! #ifndef __HEADER_HPP__
//! #define __HEADER_HPP__
//! ...
//! #endif // __HEADER_HPP__
//! \endcode
//! - Variable names are all in lowercase and underscore-separated (aka 'snake_case'):
//! \code{.cpp}
//! int num_bands;
//! std::complex<double> beta_psi;
//! \endcode
//! but not
//! \code{.cpp}
//! int NumBands;
//! /* or */
//! std::complex<double> BetaPsi;
//! /* or */
//! std::complex<double> Another_BetaPsi;
//! \endcode
//! - Order of class members: private, protected, public
//! \code{.cpp}
//! class A
//! {
//! private:
//! int lmax_;
//! void bar();
//! protected:
//! void foo();
//! public:
//! int lmax() const
//! {
//! return lmax_;
//! }
//! };
//! \endcode
//!
//! We use clang-format utility to enforce the basic formatting style. Please have a look at .clang-format config file
//! in the source root folder for the definitions and use helper script 'clang_format.x'.
//!
//! Class naming convention.
//!
//! Problem: all 'standard' naming conventions are not satisfactory. For example, we have a class
//! which does a DFT ground state. Following the common naming conventions it could be named like this:
//! DFTGroundState, DftGroundState, dft_ground_state. Last two are bad, because DFT (and not Dft or dft)
//! is a well recognized abbreviation. First one is band because capital G adds to DFT and we automaticaly
//! read DFTG round state.
//!
//! Solution: we can propose the following: DFTgroundState or DFT_ground_state. The first variant still
//! doens't look very good because one of the words is captalized (State) and one (ground) - is not. So we pick
//! the second variant: DFT_ground_state (by the way, this is close to the Bjarne Stroustrup's naiming convention,
//! where he uses first capital letter and underscores, for example class Io_obj).
//!
//! Some other examples:
//! - class Ground_state (composed of two words)
//! - class FFT_interface (composed of an abbreviation and a word)
//! - class Interface_XC (composed of a word and abbreviation)
//! - class Spline (single word)
//!
//! Exceptions are allowed if it makes sense. For example, low level utility classes like 'mdarray' (multi-dimentional
//! array) or 'pstdout' (parallel standard output) are named with small letters.
//!
/** \page fderiv Functional derivatives
Definition:
\f[
\frac{dF[f+\epsilon \eta ]}{d \epsilon}\Bigg\rvert_{\epsilon = 0} := \int \frac{\delta F[f]}{\delta f(x')} \eta(x') dx'
\f]
Alternative definition is:
\f[
\frac{\delta F[f(x)]}{\delta f(x')} = \lim_{\epsilon \to 0} \frac{F[f(x) + \epsilon \delta(x-x')] - F[f(x)]}{\epsilon}
\f]
*/
|
GB_binop__rdiv_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__rdiv_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fp64)
// A*D function (colscale): GB (_AxD__rdiv_fp64)
// D*A function (rowscale): GB (_DxB__rdiv_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fp64)
// C=scalar+B GB (_bind1st__rdiv_fp64)
// C=scalar+B' GB (_bind1st_tran__rdiv_fp64)
// C=A+scalar GB (_bind2nd__rdiv_fp64)
// C=A'+scalar GB (_bind2nd_tran__rdiv_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (bij / aij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (y / x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_FP64 || GxB_NO_RDIV_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rdiv_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (bij / x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (y / aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij / x) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (y / aij) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
utils.h | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <iostream>
#include <vector>
#include <string>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#ifdef _WIN32
#include <filesystem>
#else
#include <dirent.h>
#include <sys/types.h>
#endif
namespace PaddleSolution {
namespace utils {
inline std::string path_join(const std::string& dir,
const std::string& path) {
std::string seperator = "/";
#ifdef _WIN32
seperator = "\\";
#endif
return dir + seperator + path;
}
#ifndef _WIN32
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(
const std::string& path, const std::string& exts) {
std::vector<std::string> imgs;
struct dirent *entry;
DIR *dir = opendir(path.c_str());
if (dir == NULL) {
closedir(dir);
return imgs;
}
while ((entry = readdir(dir)) != NULL) {
std::string item = entry->d_name;
auto ext = strrchr(entry->d_name, '.');
if (!ext || std::string(ext) == "." || std::string(ext) == "..") {
continue;
}
if (exts.find(ext) != std::string::npos) {
imgs.push_back(path_join(path, entry->d_name));
}
}
return imgs;
}
#else
// scan a directory and get all files with input extensions
inline std::vector<std::string> get_directory_images(
const std::string& path, const std::string& exts) {
std::vector<std::string> imgs;
for (const auto& item :
std::experimental::filesystem::directory_iterator(path)) {
auto suffix = item.path().extension().string();
if (exts.find(suffix) != std::string::npos && suffix.size() > 0) {
auto fullname = path_join(path,
item.path().filename().string());
imgs.push_back(item.path().string());
}
}
return imgs;
}
#endif
// normalize and HWC_BGR -> CHW_RGB
inline void normalize(cv::Mat& im, float* data, std::vector<float>& fmean,
std::vector<float>& fstd) {
int rh = im.rows;
int rw = im.cols;
int rc = im.channels();
double normf = static_cast<double>(1.0) / 255.0;
#pragma omp parallel for
for (int h = 0; h < rh; ++h) {
const uchar* ptr = im.ptr<uchar>(h);
int im_index = 0;
for (int w = 0; w < rw; ++w) {
for (int c = 0; c < rc; ++c) {
int top_index = (c * rh + h) * rw + w;
float pixel = static_cast<float>(ptr[im_index++]);
pixel = (pixel * normf - fmean[c]) / fstd[c];
data[top_index] = pixel;
}
}
}
}
// argmax
inline void argmax(float* out, std::vector<int>& shape,
std::vector<uchar>& mask, std::vector<uchar>& scoremap) {
int out_img_len = shape[1] * shape[2];
int blob_out_len = out_img_len * shape[0];
/*
Eigen::TensorMap<Eigen::Tensor<float, 3>> out_3d(out, shape[0], shape[1], shape[2]);
Eigen::Tensor<Eigen::DenseIndex, 2> argmax = out_3d.argmax(0);
*/
float max_value = -1;
int label = 0;
#pragma omp parallel private(label)
for (int i = 0; i < out_img_len; ++i) {
max_value = -1;
label = 0;
#pragma omp for reduction(max : max_value)
for (int j = 0; j < shape[0]; ++j) {
int index = i + j * out_img_len;
if (index >= blob_out_len) {
continue;
}
float value = out[index];
if (value > max_value) {
max_value = value;
label = j;
}
}
if (label == 0) max_value = 0;
mask[i] = uchar(label);
scoremap[i] = uchar(max_value * 255);
}
}
} // namespace utils
} // namespace PaddleSolution
|
queue.h | // -*- C++ -*-
// Copyright (C) 2007-2020 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/queue.h
* @brief Lock-free double-ended queue.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Johannes Singler.
#ifndef _GLIBCXX_PARALLEL_QUEUE_H
#define _GLIBCXX_PARALLEL_QUEUE_H 1
#include <parallel/types.h>
#include <parallel/base.h>
#include <parallel/compatibility.h>
/** @brief Decide whether to declare certain variable volatile in this file. */
#define _GLIBCXX_VOLATILE volatile
namespace __gnu_parallel
{
/**@brief Double-ended queue of bounded size, allowing lock-free
* atomic access. push_front() and pop_front() must not be called
* concurrently to each other, while pop_back() can be called
* concurrently at all times.
* @c empty(), @c size(), and @c top() are intentionally not provided.
* Calling them would not make sense in a concurrent setting.
* @param _Tp Contained element type. */
template<typename _Tp>
class _RestrictedBoundedConcurrentQueue
{
private:
/** @brief Array of elements, seen as cyclic buffer. */
_Tp* _M_base;
/** @brief Maximal number of elements contained at the same time. */
_SequenceIndex _M_max_size;
/** @brief Cyclic __begin and __end pointers contained in one
atomically changeable value. */
_GLIBCXX_VOLATILE _CASable _M_borders;
public:
/** @brief Constructor. Not to be called concurrent, of course.
* @param __max_size Maximal number of elements to be contained. */
_RestrictedBoundedConcurrentQueue(_SequenceIndex __max_size)
{
_M_max_size = __max_size;
_M_base = new _Tp[__max_size];
_M_borders = __encode2(0, 0);
#pragma omp flush
}
/** @brief Destructor. Not to be called concurrent, of course. */
~_RestrictedBoundedConcurrentQueue()
{ delete[] _M_base; }
/** @brief Pushes one element into the queue at the front end.
* Must not be called concurrently with pop_front(). */
void
push_front(const _Tp& __t)
{
_CASable __former_borders = _M_borders;
int __former_front, __former_back;
__decode2(__former_borders, __former_front, __former_back);
*(_M_base + __former_front % _M_max_size) = __t;
#if _GLIBCXX_PARALLEL_ASSERTIONS
// Otherwise: front - back > _M_max_size eventually.
_GLIBCXX_PARALLEL_ASSERT(((__former_front + 1) - __former_back)
<= _M_max_size);
#endif
__fetch_and_add(&_M_borders, __encode2(1, 0));
}
/** @brief Pops one element from the queue at the front end.
* Must not be called concurrently with pop_front(). */
bool
pop_front(_Tp& __t)
{
int __former_front, __former_back;
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
while (__former_front > __former_back)
{
// Chance.
_CASable __former_borders = __encode2(__former_front,
__former_back);
_CASable __new_borders = __encode2(__former_front - 1,
__former_back);
if (__compare_and_swap(&_M_borders, __former_borders,
__new_borders))
{
__t = *(_M_base + (__former_front - 1) % _M_max_size);
return true;
}
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
}
return false;
}
/** @brief Pops one element from the queue at the front end.
* Must not be called concurrently with pop_front(). */
bool
pop_back(_Tp& __t) //queue behavior
{
int __former_front, __former_back;
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
while (__former_front > __former_back)
{
// Chance.
_CASable __former_borders = __encode2(__former_front,
__former_back);
_CASable __new_borders = __encode2(__former_front,
__former_back + 1);
if (__compare_and_swap(&_M_borders, __former_borders,
__new_borders))
{
__t = *(_M_base + __former_back % _M_max_size);
return true;
}
#pragma omp flush
__decode2(_M_borders, __former_front, __former_back);
}
return false;
}
};
} //namespace __gnu_parallel
#undef _GLIBCXX_VOLATILE
#endif /* _GLIBCXX_PARALLEL_QUEUE_H */
|
imm.h | //===------------------------------------------------------------*- C++ -*-===//
//
// Ripples: A C++ Library for Influence Maximization
// Marco Minutoli <marco.minutoli@pnnl.gov>
// Pacific Northwest National Laboratory
//
//===----------------------------------------------------------------------===//
//
// Copyright (c) 2019, Battelle Memorial Institute
//
// Battelle Memorial Institute (hereinafter Battelle) hereby grants permission
// to any person or entity lawfully obtaining a copy of this software and
// associated documentation files (hereinafter “the Software”) to redistribute
// and use the Software in source and binary forms, with or without
// modification. Such person or entity may use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and may permit
// others to do so, subject to the following conditions:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimers.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Other than as used herein, neither the name Battelle Memorial Institute or
// Battelle may be used in any form whatsoever without the express written
// consent of Battelle.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//===----------------------------------------------------------------------===//
#ifndef RIPPLES_MPI_IMM_H
#define RIPPLES_MPI_IMM_H
#include "mpi.h"
#include <cstddef>
#include <utility>
#include <vector>
#include "trng/lcg64.hpp"
#include "ripples/generate_rrr_sets.h"
#include "ripples/imm.h"
#include "ripples/imm_execution_record.h"
#include "ripples/mpi/find_most_influential.h"
#include "ripples/utility.h"
namespace ripples {
namespace mpi {
template <typename ex_tag>
struct MPI_Plus_X {
// using generate_ex_tag
// using seed_selection_ex_tag
};
template <>
struct MPI_Plus_X<mpi_omp_parallel_tag> {
using generate_ex_tag = omp_parallel_tag;
using seed_selection_ex_tag = mpi_omp_parallel_tag;
};
//! Compute ThetaPrime for the MPI implementation.
//!
//! \param x The index of the current iteration.
//! \param epsilonPrime Parameter controlling the approximation factor.
//! \param l Parameter usually set to 1.
//! \param k The size of the seed set.
//! \param num_nodes The number of nodes in the input graph.
inline size_t ThetaPrime(ssize_t x, double epsilonPrime, double l, size_t k,
size_t num_nodes, mpi_omp_parallel_tag &&) {
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
return (ThetaPrime(x, epsilonPrime, l, k, num_nodes, omp_parallel_tag{}) /
world_size) +
1;
}
//! Split a random number generator into one sequence per MPI rank.
//!
//! \tparam PRNG The type of the random number generator.
//!
//! \param gen The parallel random number generator to split.
template <typename PRNG>
void split_generator(PRNG &gen) {
// Find out rank, size
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
gen.split(world_size, world_rank);
}
template <typename PRNG>
std::vector<PRNG> rank_split_generator(const PRNG &gen) {
size_t max_num_threads(1);
#pragma omp single
max_num_threads = omp_get_max_threads();
std::vector<trng::lcg64> generator(max_num_threads, gen);
#pragma omp parallel
{
generator[omp_get_thread_num()].split(omp_get_num_threads(),
omp_get_thread_num());
}
return generator;
}
//! Collect a set of Random Reverse Reachable set.
//!
//! \tparam GraphTy The type of the input graph.
//! \tparam PRNGeneratorTy The type of the parallel random number generator.
//! \tparam diff_model_tag Type-Tag to selecte the diffusion model.
//!
//! \param G The input graph. The graph is transoposed.
//! \param k The size of the seed set.
//! \param epsilon The parameter controlling the approximation guarantee.
//! \param l Parameter usually set to 1.
//! \param generator The parallel random number generator.
//! \param record Data structure storing timing and event counts.
//! \param model_tag The diffusion model tag.
//! \param ex_tag The execution policy tag.
template <typename GraphTy, typename ConfTy, typename PRNGeneratorTy,
typename diff_model_tag, typename ExTagTrait>
auto Sampling(const GraphTy &G, const ConfTy &CFG, double l,
PRNGeneratorTy &generator, IMMExecutionRecord &record,
diff_model_tag &&model_tag, ExTagTrait &&) {
using vertex_type = typename GraphTy::vertex_type;
size_t k = CFG.k;
double epsilon = CFG.epsilon;
// sqrt(2) * epsilon
double epsilonPrime = 1.4142135623730951 * epsilon;
double LB = 0;
#ifdef ENABLE_MEMKIND
RRRsetAllocator<vertex_type> allocator("/pmem1", 0);
#else
RRRsetAllocator<vertex_type> allocator;
#endif
std::vector<RRRset<GraphTy>> RR;
auto start = std::chrono::high_resolution_clock::now();
size_t thetaPrime = 0;
for (ssize_t x = 1; x < std::log2(G.num_nodes()); ++x) {
// Equation 9
ssize_t thetaPrime = ThetaPrime(x, epsilonPrime, l, k, G.num_nodes(),
mpi_omp_parallel_tag{});
size_t delta = thetaPrime - RR.size();
record.ThetaPrimeDeltas.push_back(thetaPrime - RR.size());
auto timeRRRSets = measure<>::exec_time([&]() {
RR.insert(RR.end(), delta, RRRset<GraphTy>(allocator));
auto begin = RR.end() - delta;
GenerateRRRSets(G, generator, begin, RR.end(), record,
std::forward<diff_model_tag>(model_tag),
typename ExTagTrait::generate_ex_tag{});
});
record.ThetaEstimationGenerateRRR.push_back(timeRRRSets);
double f;
auto timeMostInfluential = measure<>::exec_time([&]() {
const auto &S =
FindMostInfluentialSet(G, CFG, RR, generator.isGpuEnabled(),
typename ExTagTrait::seed_selection_ex_tag{});
f = S.first;
});
record.ThetaEstimationMostInfluential.push_back(timeMostInfluential);
if (f >= std::pow(2, -x)) {
// std::cout << "Fraction " << f << std::endl;
LB = (G.num_nodes() * f) / (1 + epsilonPrime);
break;
}
}
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
size_t theta = Theta(epsilon, l, k, LB, G.num_nodes());
size_t thetaLocal = (theta / world_size) + 1;
auto end = std::chrono::high_resolution_clock::now();
record.ThetaEstimationTotal = end - start;
record.Theta = theta;
start = std::chrono::high_resolution_clock::now();
if (thetaLocal > RR.size()) {
size_t final_delta = thetaLocal - RR.size();
RR.insert(RR.end(), final_delta, RRRset<GraphTy>(allocator));
auto begin = RR.end() - final_delta;
GenerateRRRSets(G, generator, begin, RR.end(), record,
std::forward<diff_model_tag>(model_tag),
typename ExTagTrait::generate_ex_tag{});
}
end = std::chrono::high_resolution_clock::now();
record.GenerateRRRSets = end - start;
return RR;
}
//! The IMM algroithm for Influence Maximization (MPI specialization).
//!
//! \tparam GraphTy The type of the input graph.
//! \tparam PRNG The type of the parallel random number generator.
//! \tparam diff_model_tag Type-Tag to selecte the diffusion model.
//!
//! \param G The input graph. The graph is transoposed.
//! \param k The size of the seed set.
//! \param epsilon The parameter controlling the approximation guarantee.
//! \param l Parameter usually set to 1.
//! \param gen The parallel random number generator.
//! \param model_tag The diffusion model tag.
//! \param ex_tag The execution policy tag.
template <typename GraphTy, typename ConfTy, typename diff_model_tag,
typename GeneratorTy, typename ExTagTrait>
auto IMM(const GraphTy &G, const ConfTy &CFG, double l, GeneratorTy &gen,
IMMExecutionRecord &record, diff_model_tag &&model_tag,
ExTagTrait &&ex_tag) {
using vertex_type = typename GraphTy::vertex_type;
l = l * (1 + 1 / std::log2(G.num_nodes()));
auto R = mpi::Sampling(G, CFG, l, gen, record,
std::forward<diff_model_tag>(model_tag),
std::forward<ExTagTrait>(ex_tag));
auto start = std::chrono::high_resolution_clock::now();
const auto &S =
FindMostInfluentialSet(G, CFG, R, gen.isGpuEnabled(),
typename ExTagTrait::seed_selection_ex_tag{});
auto end = std::chrono::high_resolution_clock::now();
record.FindMostInfluentialSet = end - start;
return S.second;
}
} // namespace mpi
} // namespace ripples
#endif // RIPPLES_MPI_IMM_H
|
rnn_impl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file rnn_impl.h
* \brief
* \author Shu Zhang
*/
#ifndef MXNET_OPERATOR_RNN_IMPL_H_
#define MXNET_OPERATOR_RNN_IMPL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <algorithm>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include "./math.h"
#include "./math_functions-inl.h"
#include "./operator_common.h"
#include "./mshadow_op.h"
#include "./linalg.h"
namespace mxnet {
namespace op {
template<typename DType>
inline DType sigmoid(DType x) {
return 1.0f / (1.0f + exp(-x));
}
template<typename DType>
inline DType relu(DType x) {
return x > 0.0f ? static_cast<float>(x) : 0.0f;
}
template<typename DType>
void LstmForwardTrainingSingleLayer(DType* ws,
DType* rs,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
const Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, 4 * H));
const Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, 4 * H));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[i-1][j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
h[j][k] = ht;
// reserve
y[t][j][k + offset] = ht;
c[i][j][k] = ct;
ifgo[i][j][k][0] = it;
ifgo[i][j][k][1] = ft;
ifgo[i][j][k][2] = gt;
ifgo[i][j][k][3] = ot;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
}
}
}
}
template <typename DType>
void LstmForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr,
const float dropout) {
DType* dropout_random = rs;
DType* rs2 = dropout_random + (L - 1) * D * T * N * H;
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int cell_size = N * H;
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
int idx = 0; // state & cell state's idx;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < L; ++i) {
const int input_size = i ? H * D : I;
const int w_size = (input_size + H) * H * 4;
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(rs2 + y_offset, Shape3(T, N, H * D));
LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, false, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, true, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
if (dropout > 0.0f) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < T * N * H * D; j++) {
int rand_data = rand_r(&seed_);
if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) {
dropout_random[i * T * N * H * D + j] = 0;
y.dptr_[j] = 0;
} else {
dropout_random[i * T * N * H * D + j] = 1.0f - dropout;
y.dptr_[j] = y.dptr_[j] / (1.0f - dropout);
}
}
}
x_ptr = y.dptr_;
rs2 += r_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = (rs2 + y_offset)[i];
}
}
template<typename DType>
void LstmForwardInferenceSingleLayer(DType* ws,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const int P,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, (P ? P : H)));
Tensor<cpu, 2, DType> whr(w_ptr, Shape2(1, 1));
if (P > 0) whr = Tensor<cpu, 2, DType>(wh.dptr_ + P * 4 * H, Shape2(P, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, H * 4));
Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, H * 4));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> c(h.dptr_ + N * H, Shape2(N, H));
Tensor<cpu, 2, DType> r(hy_ptr, Shape2(1, 1));
if (P > 0) r = Tensor<cpu, 2, DType>(hy_ptr, Shape2(N, P));
const int offset = bid ? H : 0;
const int proj_offset = bid ? P : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
if (P > 0) {
linalg_gemm(i ? r : hx, wh, yh_flat, alpha, beta, false, true);
} else {
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
}
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
if (P == 0) y[t][j][k + offset] = ht;
if (i == T - 1 && state_outputs) {
if (P == 0) hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
} else {
c[j][k] = ct;
}
h[j][k] = ht;
}
if (P > 0) {
linalg_gemm(h, whr, r, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < N; ++j) {
std::memcpy(y[t][j].dptr_ + proj_offset, r[j].dptr_, P * sizeof(DType));
}
}
}
}
template <typename DType>
void LstmForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
const int P,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr) {
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, P ? P : H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int cell_size = N * H;
const int projection_size = (P ? P : H) * N;
DType* y_tmp_ptr = ws + (T + 1) * cell_size * 4 + cell_size * 2;
DType* y_cur_ptr = y_ptr;
int idx = 0; // state & cell state's idx;
bool flag = L % 2 ? false : true;
for (int i = 0; i < L; ++i) {
const int input_size = i ? (P ? P : H) * D : I;
int w_size = (input_size + (P ? P : H)) * H * 4;
if (P > 0) {
w_size += P * H;
}
// If bidirectional, need space to save current layer output y.
if (D == 2) {
y_cur_ptr = flag ? y_tmp_ptr : y_ptr;
flag = !flag;
}
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(y_cur_ptr, Shape3(T, N, (P ? P : H) * D));
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, false, T, N, input_size, H, P,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
// If bidirectional, then calculate the reverse direction's forward result.
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += projection_size;
cy_ptr += cell_size;
}
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, true, T, N, input_size, H, P,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
// Don't need to move pointer in the last layer.
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
x_ptr = y_cur_ptr;
++idx;
if (state_outputs) {
hy_ptr += projection_size;
cy_ptr += cell_size;
}
}
}
}
template <typename DType>
void LstmBackwardSingleLayer(DType* ws,
DType* rs,
DType* tmp_buf,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
const Tensor<cpu, 3, DType> &dy,
const Tensor<cpu, 2, DType> &dx,
const Tensor<cpu, 2, DType> &dhx,
const Tensor<cpu, 2, DType> &dcx,
DType* dhy_ptr,
DType* dcy_ptr,
DType* w_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 2, DType> dwx(dw_ptr, Shape2(H * 4, I));
Tensor<cpu, 2, DType> dwh(dw_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 1, DType> dbx(db_ptr, Shape1(H * 4));
Tensor<cpu, 1, DType> dbh(dbx.dptr_ + H * 4, Shape1(H * 4));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
const Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
const Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * 4 * H; ++i) {
dwh.dptr_[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 4 * H; ++i) {
dbx.dptr_[i] = 0;
dbh.dptr_[i] = 0;
}
}
Tensor<cpu, 4, DType> difgo(ws, Shape4(T, N, 4, H));
Tensor<cpu, 2, DType> dh(ws + T * N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> dc(dh.dptr_ + N * H, Shape2(N, H));
Tensor<cpu, 2, DType> htmp(dc.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta0 = 0.0;
const DType beta1 = 1.0;
const DType beta2 = 2.0;
const int cell_size = N * H;
if (dhy_ptr != nullptr) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dh.dptr_[i] = dhy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dh.dptr_[i] = 0;
}
}
if (dcy_ptr != nullptr) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dc.dptr_[i] = dcy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dc.dptr_[i] = 0;
}
}
for (int i = T - 1; i >= 0; --i) {
int t = bid ? T - 1 - i : i;
int tnext = bid ? t + 1 : t - 1;
const Tensor<cpu, 2, DType>& dhnext = i ? dh : dhx;
const Tensor<cpu, 2, DType>& dcnext = i ? dc : dcx;
const Tensor<cpu, 2, DType>& hnext = i ? htmp : hx;
const Tensor<cpu, 2, DType>& cnext = i ? c[i - 1] : cx;
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType tc = tanh(c[i][j][k]);
DType it = ifgo[i][j][k][0];
DType ft = ifgo[i][j][k][1];
DType gt = ifgo[i][j][k][2];
DType ot = ifgo[i][j][k][3];
dh[j][k] += dy[t][j][k + offset];
dc[j][k] += dh[j][k] * ot * (1 - tc * tc);
difgo[t][j][0][k] = dc[j][k] * gt * it * (1 - it);
difgo[t][j][1][k] = dc[j][k] * cnext[j][k] * ft * (1 - ft);
difgo[t][j][2][k] = dc[j][k] * it * (1 - gt * gt);
difgo[t][j][3][k] = dh[j][k] * tc * ot * (1 - ot);
if (req_statecell != kNullOp || i > 0) {
dcnext[j][k] = dc[j][k] * ft;
}
if (i) {
htmp[j][k] = y[tnext][j][k + offset];
}
}
Tensor<cpu, 2, DType> dyh(difgo[t].dptr_, Shape2(N, H * 4));
if (req_state != kNullOp || i > 0) {
linalg_gemm(dyh, wh, dhnext, alpha, beta0, false, false);
}
if (req_params != kNullOp) {
if (req_params != kAddTo) {
linalg_gemm(dyh, hnext, dwh, alpha, beta1, true, false);
} else {
linalg_gemm(dyh, hnext, dwh, alpha, beta2, true, false);
// generate dwx every time step for AddTo
Tensor<cpu, 2, DType> x_t(x.dptr_ + i * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> dyx_t(difgo.dptr_ + i * N * H * 4, Shape2(N, H * 4));
linalg_gemm(dyx_t, x_t, dwx, alpha, beta2, true, false);
}
}
}
Tensor<cpu, 2, DType> dyx(difgo.dptr_, Shape2(T * N, H * 4));
if (req_data != kNullOp) {
linalg_gemm(dyx, wx, dx, alpha, bid ? beta1 : beta0, false, false);
}
if (req_params != kNullOp && req_params != kAddTo) {
linalg_gemm(dyx, x, dwx, alpha, beta0, true, false);
}
const int row = T * N;
const int col = H * 4;
if (req_params != kNullOp) {
if (req_params != kAddTo) {
for (int i = 0; i < row; ++i) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
dbx[j] += dyx[i][j];
dbh[j] = dbx[j];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf, Shape2(col, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + col * T, Shape2(col, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < col * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
for (int i = 0; i < N; ++i) {
tmp_dbx[j][t] += dyx[t * N + i][j];
tmp_dbh[j][t] = tmp_dbx[j][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
dbx[j] += tmp_dbx[j][t] + dbx[j];
dbh[j] += tmp_dbh[j][t] + dbh[j];
}
}
}
}
}
template <typename DType>
void LstmBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dcy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dcx_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell,
const float dropout) {
DType* dropout_random = rs + (L - 1) * D * T * N * H;
DType* rs2 = rs + (L - 1) * D * T * N * H;
DType* tmp_buf = ws;
DType* ws2 = tmp_buf + 8 * T * H;
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dhx(dhx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dcx(dcx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int w_size1 = (I + H) * H * 4; // first layer
const int w_size2 = (D * H + H) * H * 4; // other layers
const int cell_size = N * H;
DType* dy_tmp_ptr = ws2 + T * cell_size * 4 + cell_size * 3;
for (int i = L - 1; i >= 0; --i) {
const int input_size = i ? H * D : I;
const int w_size = i ? w_size2 : w_size1;
int idx = i * D;
DType* w_cur_ptr = i ? w_ptr + (w_size1 + (i - 1) * w_size2) * D : w_ptr;
DType* dw_cur_ptr = i ? dw_ptr + (w_size1 + (i - 1) * w_size2) * D : dw_ptr;
DType* db_cur_ptr = db_ptr + i * b_size * D;
DType* rs_cur_ptr = rs2 + i * r_size;
DType* dhy_cur_ptr = dhy_ptr ? dhy_ptr + i * cell_size * D : nullptr;
DType* dcy_cur_ptr = dcy_ptr ? dcy_ptr + i * cell_size * D : nullptr;
Tensor<cpu, 3, DType> y(rs_cur_ptr + y_offset, Shape3(T, N, H * D));
Tensor<cpu, 3, DType> dy(dy_ptr, Shape3(T, N, H * D));
Tensor<cpu, 2, DType> x(i ? y.dptr_ - r_size : x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 2, DType> dx(i ? dy_tmp_ptr : dx_ptr, Shape2(T * N, input_size));
LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, false, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr,
req_data, req_params, req_state, req_statecell);
if (D == 2) {
w_cur_ptr += w_size;
dw_cur_ptr += w_size;
db_cur_ptr += b_size;
++idx;
dhy_cur_ptr = dhy_ptr ? dhy_cur_ptr + cell_size : nullptr;
dcy_cur_ptr = dcy_ptr ? dcy_cur_ptr + cell_size : nullptr;
LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, true, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr,
req_data, req_params, req_state, req_statecell);
}
if (dropout > 0.0f && i > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < T * N * D * H; j++) {
if (dropout_random[j] == 0) {
dx.dptr_[j] = 0;
} else {
dx.dptr_[j] = dx.dptr_[j] / (1.0f - dropout);
}
}
}
dy_ptr = dx.dptr_;
}
}
template<typename DType>
void GruForwardInferenceSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gemmC2 + N * 3 * H;
DType* zt = rt + N * H;
DType* nt = zt + N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + 3 * H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + 3 * H * 2: nullptr;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
gemmC1_t = gemmC1 + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j]
+ bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j]
+ bx[1][j] + bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] +
zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] +
gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] +
gemmC2[ztb + j] + back_bx[1][j]+ back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j]
+ rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j]
+ zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
DType* y_tmp = ws;
DType* y_l = x_ptr;
DType* tmp_buf = y_tmp + D * T * N * H;
DType* ws2 = y_tmp + D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
for (int l = 0; l < L; l++) {
Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I));
if ((L + l) % 2) {
y_l = y_ptr;
} else {
y_l = y_tmp;
}
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l, hy_l);
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
}
template<typename DType>
void GruForwardTrainingSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gateR;
DType* zt = gateZ;
DType* nt = gateN;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + 3 * H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + 3 * H * 2 : nullptr;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_Mnh = Mnh + T * N * H;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
gemmC1_t = gemmC1 + t * N * 3 * H;
DType* Mnht = Mnh + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
Mnht[i * H + j] = gemmC2[ntb + j] + bh[2][j];
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j]
+ bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j]
+ bx[1][j] + bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] +
zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
rt = back_gateR + (T - 1 - t) * N * H;
zt = back_gateZ + (T - 1 - t) * N * H;
nt = back_gateN + (T - 1 - t) * N * H;
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
DType* back_Mnht = back_Mnh + (T - 1 - t) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
back_Mnht[i * H + j] = gemmC2[ntb + j] + back_bh[2][j];
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] +
gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] +
gemmC2[ztb + j] + back_bx[1][j] + back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j]
+ rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j]
+ zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
const float dropout) {
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
DType* gateR_l = rs;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* dropout_random = Mnh_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* ws2 = tmp_buf + D * N * H;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
for (int l = 0; l < L; l++) {
if (l != 0) {
y_tmp = y_l;
y_l = y_l + T * N * H * D;
}
if (dropout > 0.0f && l > 0) {
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
int rand_data = rand_r(&seed_);
if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) {
dropout_random[(l - 1) * T * N * I + i] = 0;
y_tmp[i] = 0;
} else {
dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout;
y_tmp[i] = y_tmp[i] / (1.0f - dropout);
}
}
}
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l,
gateR_l, gateZ_l, gateN_l, Mnh_l, y_l, hy_l);
gateR_l = gateR_l + T * D * N * H;
gateZ_l = gateZ_l + T * D * N * H;
gateN_l = gateN_l + T * D * N * H;
Mnh_l = Mnh_l + T * D * N * H;
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
}
}
template <typename DType>
void GruBackwardSingleLayer(DType* ws,
DType* tmp_buf,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* dx,
DType* dhx,
DType* dwx,
DType* dwh,
DType* dbx,
DType* dbh,
int req_data,
int req_params,
int req_state) {
DType* dyt;
DType* ht1; // [N, D, H]
DType* rt;
DType* zt;
DType* nt;
DType* dat;
DType* dart;
DType* dar = ws; // [T, N, 3 * H]
DType* da = dar + T * N * 3 * H; // [T, N, 3 * H]
DType* dht1 = da + T * N * 3 * H; // [D, N, H]
DType* hx_ = dht1 + D * N * H; // [N, D, H]
DType* Mnht = Mnh;
DType* back_ht1;
DType* back_dht1 = dht1 + N * H; // [N, H]
DType* back_Mnht = Mnh + T * N * H;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_dwx = dwx + I * 3 * H + H * 3 * H;
DType* back_dwh = dwh + I * 3 * H + H * 3 * H;
DType* back_dbx = dbx + 3 * H * 2;
DType* back_dbh = dbh + 3 * H * 2;
DType alpha = 1.0;
DType beta = 0.0;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * 3 * H; ++i) {
dwh[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * 3 * H; ++i) {
dbx[i] = 0;
dbh[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
dht1[i] = dhy_ptr[i];
} else {
dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + j] = hx[i][j];
}
}
if (D == 2) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
back_dht1[i] = dhy_ptr[N * H + i];
} else {
back_dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + H + j] = hx[N + i][j];
}
}
}
for (int t = T - 1; t >= 0; --t) {
if (t) {
ht1 = y_ptr + (t - 1) * N * D * H;
} else {
ht1 = hx_;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
dht1[i * H + j] += dyt[i * D * H + j];
}
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
Mnht = Mnh + t * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int nid = i * 3 * H + 2 * H + j;
int zid = i * 3 * H + H + j;
int rid = i * 3 * H + j;
int id = i * H + j;
dat[nid] = dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] = dht1[id] * (ht1[i * D * H + j] - nt[id]) *
zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * Mnht[id] * rt[id] *
(1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
dht1[id] = dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = dart * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H));
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false);
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_dwx, alpha, beta, true, false);
}
// dwh = dart.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H));
Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(3 * H, H));
Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N));
linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N * T; ++j) {
dbx[i] += da[j * 3 * H + i];
dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
dbx[i] += tmp_dbx[i][t] + dbx[i];
dbh[i] += tmp_dbh[i][t] + dbh[i];
}
}
}
}
alpha = 1.0;
beta = 0.0;
// dx = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da, wx, d_dx, alpha, beta, false, false);
}
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_da, x, d_dwx, alpha, beta, true, false);
}
if (D == 2) {
for (int t = 0; t < T; ++t) {
if (t == T-1) {
back_ht1 = hx_;
} else {
back_ht1 = y_ptr + (t + 1) * N * D * H;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
back_dht1[i * H + j] += dyt[i * D * H + H + j];
}
}
rt = back_gateR + t * N * H;
zt = back_gateZ + t * N * H;
nt = back_gateN + t * N * H;
back_Mnht = Mnh + (T + t) * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int nid = i * 3 * H + 2 * H + j;
int zid = i * 3 * H + H + j;
int rid = i * 3 * H + j;
int id = i * H + j;
dat[nid] = back_dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] = back_dht1[id] * (back_ht1[i * D * H + H + j] -
nt[id]) * zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * back_Mnht[id] * rt[id] *
(1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
back_dht1[id] = back_dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = da * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H));
linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false);
// dwh = da.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(3 * H, H));
Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H));
Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N));
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_back_dwx, alpha, beta, true, false);
}
linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N * T; ++j) {
back_dbx[i] += da[j * 3 * H + i];
back_dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
back_dbx[i] += tmp_dbx[i][t] + back_dbx[i];
back_dbh[i] += tmp_dbh[i][t] + back_dbh[i];
}
}
}
}
alpha = 1.0;
beta = 1.0;
// dxt = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da2(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da2, back_wx, d_dx, alpha, beta, false, false);
}
alpha = 1.0;
beta = 0.0;
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_da2, x, d_back_dwx, alpha, beta, true, false);
}
}
if (req_state != kNullOp) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H * D; ++i) {
dhx[i] = dht1[i];
}
}
}
template <typename DType>
void GruBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dw_ptr,
int req_data,
int req_params,
int req_state,
const float dropout) {
DType* wx = w_ptr;
DType* dwx = dw_ptr;
DType* dwh = dwx + I * H * 3;
DType* dbx = dwh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* gateR_l = rs + (L - 1) * T * D * N * H;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* dropout_random = Mnh_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* dx_l = tmp_buf + T * N * D * H + 3 * H * T * 2;
DType* ws2 = dx_l + T * N * D * H;
DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * 3 * H
+ D * I * 3 * H + D * H * 3 * H;
DType* wh_l = wx_l;
if (L == 1) {
wh_l = wh_l + I * H * 3;
} else {
wh_l = wh_l + (D * H) * H * 3;
}
DType* dhy_l = nullptr;
if (dhy_ptr)
dhy_l = dhy_ptr + (L - 1) * D * N * H;
DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * 3 * H
+ D * I * 3 * H + D * H * 3 * H;
DType* dwh_l = nullptr;
if (L == 1) {
dwh_l = dwx_l + I * H * 3;
} else {
dwh_l = dwx_l + (D * H) * H * 3;
}
DType* dbx_l = dbx + (L - 1) * D * 3 * H * 2;
DType* dbh_l = dbx_l + 3 * H;
DType* dhx_l = dhx_ptr + (L - 1) * D * N * H;
DType* dy_l = dy_ptr;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
int inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
y_tmp = x_ptr;
dx_l = dx_ptr;
} else {
I = D * H;
}
Tensor<cpu, 2, DType> hx_l = hx[l];
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
GruBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, y_l, dy_l,
dhy_l, gateR_l, gateZ_l, gateN_l, Mnh_l, dx_l, dhx_l,
dwx_l, dwh_l, dbx_l, dbh_l, req_data, req_params, req_state);
if (dropout > 0.0f && l > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
if (dropout_random[i] == 0) {
dx_l[i] = 0;
} else {
dx_l[i] = dx_l[i] / (1.0f - dropout);
}
}
}
if (l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
dy_l[i] = dx_l[i];
}
gateR_l = gateR_l - T * D * N * H;
gateZ_l = gateZ_l - T * D * N * H;
gateN_l = gateN_l - T * D * N * H;
Mnh_l = Mnh_l - T * D * N * H;
dhx_l = dhx_l - D * N * H;
if (dhy_l)
dhy_l = dhy_l - D * N * H;
y_l = y_l - T * N * H * D;
y_tmp = y_l;
if (l == 1) {
wx_l = wx_l - (inputsize + H) * H * 3 * D;
wh_l = wx_l + inputsize * 3 * H;
dwx_l = dwx_l - (inputsize + H) * H * 3 * D;
dwh_l = dwx_l + inputsize * 3 * H;
} else {
wx_l = wx_l - (I + H) * H * 3 * D;
wh_l = wx_l + I * 3 * H;
dwx_l = dwx_l - (I + H) * H * 3 * D;
dwh_l = dwx_l + I * 3 * H;
}
dbx_l = dbx_l - D * 3 * H * 2;
dbh_l = dbx_l + 3 * H;
}
}
}
template<typename DType>
void VanillaRNNForwardInferenceSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, H]
DType* gemmC2 = gemmC1 + D * T * N * H; // N * H
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + H * 2: nullptr;
DType* back_gemmC1 = gemmC1 + T * N * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H));
// x * wx.T : [T * N, I] * [I, H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
gemmC1_t = gemmC1 + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
} else {
ht[i * D * H + j] = relu(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
}
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
} else {
back_ht[i * D * H + j] = relu(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
}
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void VanillaRNNForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* wx = w_ptr;
DType* wh = wx + I * H;
DType* bx = wh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* bh = bx + H;
DType* y_tmp = ws;
DType* y_l = x_ptr;
DType* tmp_buf = y_tmp + D * T * N * H;
DType* ws2 = y_tmp + D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
for (int l = 0; l < L; l++) {
Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I));
if ((L + l) % 2) {
y_l = y_ptr;
} else {
y_l = y_tmp;
}
Tensor<cpu, 2, DType> hx_l = hx[D * l];
VanillaRNNForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l,
hy_l, mode);
hy_l = hy_l + D * N * H;
bx_l = bx_l + H * D * 2;
bh_l = bh_l + H * D * 2;
wx_l = wx_l + I * H * D + H * H * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * H;
}
}
template<typename DType>
void VanillaRNNForwardTrainingSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* gateN,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, H]
DType* gemmC2 = gemmC1 + D * T * N * H; // N * H
DType* nt = gateN;
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + H * 2 : nullptr;
DType* back_gateN = gateN + T * N * H;
DType* back_gemmC1 = gemmC1 + T * N * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 1, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 1, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H));
// x * wx.T : [T * N, I] * [I, H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
nt = gateN + t * N * H;
gemmC1_t = gemmC1 + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
nt[tb + j] = ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
} else {
nt[tb + j] = gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j];
ht[i * D * H + j] = relu(nt[tb + j]);
}
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
nt = back_gateN + (T - 1 - t) * N * H;
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
nt[tb + j] = back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
} else {
nt[tb + j] = gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j];
back_ht[i * D * H + j] = relu(nt[tb + j]);
}
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void VanillaRNNForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
const float dropout,
int mode) {
DType* wx = w_ptr;
DType* wh = wx + I * H;
DType* bx = wh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* bh = bx + H;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
DType* gateN_l = rs;
DType* y_l = gateN_l + L * T * D * N * H;
DType* dropout_random = y_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* ws2 = tmp_buf + D * N * H;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
for (int l = 0; l < L; l++) {
if (l != 0) {
y_tmp = y_l;
y_l = y_l + T * N * H * D;
}
if (dropout > 0.0f && l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
int rand_data = rand_r(&seed_);
if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) {
dropout_random[(l - 1) * T * N * I + i] = 0;
y_tmp[i] = 0;
} else {
dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout;
y_tmp[i] = y_tmp[i] / (1.0f - dropout);
}
}
}
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
Tensor<cpu, 2, DType> hx_l = hx[D * l];
VanillaRNNForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l,
gateN_l, y_l, hy_l, mode);
gateN_l = gateN_l + T * D * N * H;
hy_l = hy_l + D * N * H;
bx_l = bx_l + H * D * 2;
bh_l = bh_l + H * D * 2;
wx_l = wx_l + I * H * D + H * H * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * H;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
}
}
template <typename DType>
void VanillaRNNBackwardSingleLayer(DType* ws,
DType* tmp_buf,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* gateN,
DType* dx,
DType* dhx,
DType* dwx,
DType* dwh,
DType* dbx,
DType* dbh,
int req_data,
int req_params,
int req_state,
int mode) {
DType* dyt;
DType* ht1; // [N, D, H]
DType* dart;
DType* nt;
DType* dar = ws; // [T, N, H]
DType* dht1 = dar + T * N * H; // [D, N, H]
DType* hx_ = dht1 + D * N * H; // [N, D, H]
DType* back_ht1;
DType* back_dht1 = dht1 + N * H; // [N, H]
DType* back_gateN = gateN + T * N * H;
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_dwx = dwx + I * H + H * H;
DType* back_dwh = dwh + I * H + H * H;
DType* back_dbx = dbx + H * 2;
DType* back_dbh = dbh + H * 2;
DType alpha = 1.0;
DType beta = 0.0;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * H; ++i) {
dwh[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H; ++i) {
dbx[i] = 0;
dbh[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
dht1[i] = dhy_ptr[i];
} else {
dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + j] = hx[i][j];
}
}
if (D == 2) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
back_dht1[i] = dhy_ptr[N * H + i];
} else {
back_dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + H + j] = hx[N + i][j];
}
}
}
for (int t = T - 1; t >= 0; --t) {
if (t) {
ht1 = y_ptr + (t - 1) * N * D * H;
} else {
ht1 = hx_;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
dht1[i * H + j] += dyt[i * D * H + j];
}
}
nt = gateN + t * N * H;
dart = dar + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int id = i * H + j;
if (mode == 1) {
dart[id] = dht1[id] * (1 - nt[id] * nt[id]);
} else {
dart[id] = nt[id] > 0.0f ? static_cast<float>(dht1[id]) : 0.0f;
}
dht1[id] = 0;
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = dart * wh [N, H] = [N, H] * [H, H]
Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H));
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H));
linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false);
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [H, I] = [H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I));
linalg_gemm(d_dart, d_xt, d_dwx, alpha, beta, true, false);
}
// dwh = dart.T * ht1 [H, H] = [H, N] * [N, H]
Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H));
Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(H, H));
Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N));
linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, H] = [1, N] * [N, H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N * T; ++j) {
dbx[i] += dar[j * H + i];
dbh[i] = dbx[i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += dar[t * N * H + j * H + i];
tmp_dbh[i][t] = tmp_dbx[i][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
dbx[i] += tmp_dbx[i][t] + dbx[i];
dbh[i] = dbx[i];
}
}
}
}
alpha = 1.0;
beta = 0.0;
// dx = da * wx [T * N, I] = [T * N, H] * [H, I]
Tensor<cpu, 2, DType> d_dar(dar, Shape2(T * N, H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_dar, wx, d_dx, alpha, beta, false, false);
}
// dwx = da.T * x [H, I] = [H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I));
linalg_gemm(d_dar, x, d_dwx, alpha, beta, true, false);
}
if (D == 2) {
for (int t = 0; t < T; ++t) {
if (t == T-1) {
back_ht1 = hx_;
} else {
back_ht1 = y_ptr + (t + 1) * N * D * H;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
back_dht1[i * H + j] += dyt[i * D * H + H + j];
}
}
nt = back_gateN + t * N * H;
dart = dar + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int id = i * H + j;
if (mode == 1) {
dart[id] = back_dht1[id] * (1 - nt[id] * nt[id]);
} else {
dart[id] = nt[id] > 0.0f ? static_cast<float>(back_dht1[id]) : 0.0f;
}
back_dht1[id] = 0;
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = da * wh [N, H] = [N, H] * [H, H]
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H));
Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H));
linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false);
// dwh = da.T * ht1 [H, H] = [H, N] * [N, H]
Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(H, H));
Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H));
Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N));
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [ H, I] = [H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I));
linalg_gemm(d_dart, d_xt, d_back_dwx, alpha, beta, true, false);
}
linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, H] = [1, N] * [N, H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N * T; ++j) {
back_dbx[i] += dar[j * H + i];
back_dbh[i] = back_dbx[i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += dar[t * N * H + j * H + i];
tmp_dbh[i][t] = tmp_dbx[i][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
back_dbx[i] += tmp_dbx[i][t] + back_dbx[i];
back_dbh[i] = back_dbx[i];
}
}
}
}
alpha = 1.0;
beta = 1.0;
// dxt = da * wx [T * N, I] = [T * N, H] * [H, I]
Tensor<cpu, 2, DType> d_dar2(dar, Shape2(T * N, H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_dar2, back_wx, d_dx, alpha, beta, false, false);
}
alpha = 1.0;
beta = 0.0;
// dwx = da.T * x [H, I] = [H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I));
linalg_gemm(d_dar2, x, d_back_dwx, alpha, beta, true, false);
}
}
if (req_state != kNullOp) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H * D; ++i) {
dhx[i] = dht1[i];
}
}
}
template <typename DType>
void VanillaRNNBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dw_ptr,
int req_data,
int req_params,
int req_state,
const float dropout,
int mode) {
DType* wx = w_ptr;
DType* dwx = dw_ptr;
DType* dwh = dwx + I * H;
DType* dbx = dwh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* gateN_l = rs + (L - 1) * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* dropout_random = y_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* dx_l = tmp_buf + T * N * D * H + H * T * 2;
DType* ws2 = dx_l + T * N * D * H;
DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * H
+ D * I * H + D * H * H;
DType* wh_l = wx_l;
if (L == 1) {
wh_l = wh_l + I * H;
} else {
wh_l = wh_l + (D * H) * H;
}
DType* dhy_l = nullptr;
if (dhy_ptr)
dhy_l = dhy_ptr + (L - 1) * D * N * H;
DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * H
+ D * I * H + D * H * H;
DType* dwh_l = nullptr;
if (L == 1) {
dwh_l = dwx_l + I * H;
} else {
dwh_l = dwx_l + (D * H) * H;
}
DType* dbx_l = dbx + (L - 1) * D * H * 2;
DType* dbh_l = dbx_l + H;
DType* dhx_l = dhx_ptr + (L - 1) * D * N * H;
DType* dy_l = dy_ptr;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
int inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
y_tmp = x_ptr;
dx_l = dx_ptr;
} else {
I = D * H;
}
Tensor<cpu, 2, DType> hx_l = hx[l];
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
VanillaRNNBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l,
y_l, dy_l, dhy_l, gateN_l, dx_l, dhx_l, dwx_l, dwh_l,
dbx_l, dbh_l, req_data, req_params, req_state, mode);
if (dropout > 0.0f && l > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
if (dropout_random[i] == 0) {
dx_l[i] = 0;
} else {
dx_l[i] = dx_l[i] / (1.0f - dropout);
}
}
}
if (l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
dy_l[i] = dx_l[i];
}
gateN_l = gateN_l - T * D * N * H;
dhx_l = dhx_l - D * N * H;
if (dhy_l)
dhy_l = dhy_l - D * N * H;
y_l = y_l - T * N * H * D;
y_tmp = y_l;
if (l == 1) {
wx_l = wx_l - (inputsize + H) * H * D;
wh_l = wx_l + inputsize * H;
dwx_l = dwx_l - (inputsize + H) * H * D;
dwh_l = dwx_l + inputsize * H;
} else {
wx_l = wx_l - (I + H) * H * D;
wh_l = wx_l + I * H;
dwx_l = dwx_l - (I + H) * H * D;
dwh_l = dwx_l + I * H;
}
dbx_l = dbx_l - D * H * 2;
dbh_l = dbx_l + H;
}
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_RNN_IMPL_H_
|
hard.h | #pragma once
template <class Tp>
class VectorNoClear : public std::vector<Tp> {
PS::U32 siz;
public:
VectorNoClear() : std::vector<Tp> (4) { siz = 0; }
//VectorNoClear(std::vector<Tp> & vec) : std::vector<Tp>(vec) { siz = std::vector<Tp>::size(); }
//VectorNoClear(VectorNoClear<Tp> & vec) {
// if ( siz < vec.siz ) resize(vec.siz);
// for (PS::S32 i=0; i<vec.siz; i++) std::vector<Tp>::at(i) = vec.at(i);
// siz = vec.siz;
//}
void clear() { siz = 0; }
void push_back(const Tp & pi) {
if ( siz < std::vector<Tp>::size() ) {
std::vector<Tp>::at(siz) = pi;
} else {
std::vector<Tp>::push_back(pi);
}
siz ++;
//assert ( siz <= std::vector<Tp>::size() );
}
void push_back(Tp && pi) {
if ( siz < std::vector<Tp>::size() ) {
//std::vector<Tp>::at(siz) = pi;
this->at(siz) = pi;
} else {
std::vector<Tp>::push_back(pi);
}
siz ++;
//assert ( siz <= std::vector<Tp>::size() );
}
void resize(const PS::U32 n) {
if ( n > std::vector<Tp>::size() ){
std::vector<Tp>::resize(n);
}
siz = n;
}
void reserve (const PS::U32 n) {
if ( n > std::vector<Tp>::capacity() ){
std::vector<Tp>::reserve(n);
}
}
PS::U32 size() const { return siz; }
PS::U32 capacity() const { return std::vector<Tp>::capacity(); }
};
class HardSystem{
public:
std::vector<PS::S32> list_iso;
//std::vector<std::vector<std::pair<bool,PS::S32> > > list_multi;
//std::vector<std::vector<FPHard> > ptcl_multi;
VectorNoClear<std::vector<std::pair<bool,PS::S32> > > list_multi;
VectorNoClear<VectorNoClear<FPHard> > ptcl_multi;
std::map<PS::S32,PS::S32> mp_cluster; // cluster ID -> cluster adress in HardSystem
std::vector<Collision> collision_list;
std::vector<std::pair<PS::S32,PS::S32> > frag_list;
PS::S32 n_col;
PS::S32 n_frag;
PS::F64 edisp;
PS::F64 edisp_d;
//static PS::F64 f;
PS::S32 getNumberOfClusterLocal() const { return ptcl_multi.size(); }
PS::S32 getNumberOfClusterGlobal() const {
#ifdef PARTICLE_SIMULATOR_MPI_PARALLEL
PS::S32 n_cluster_loc = ptcl_multi.size();
return PS::Comm::getSum(n_cluster_loc);
#else
return ptcl_multi.size();
#endif
}
PS::S32 getNumberOfIsolatedParticleLocal() const { return list_iso.size(); }
PS::S32 getNumberOfIsolatedParticleGlobal() const {
#ifdef PARTICLE_SIMULATOR_MPI_PARALLEL
PS::S32 n_ptcl_loc = list_iso.size();
return PS::Comm::getSum(n_ptcl_loc);
#else
return list_iso.size();
#endif
}
PS::S32 getNumberOfParticleLocal(){
PS::S32 n = 0;
PS::S32 size = ptcl_multi.size();
for ( PS::S32 i=0; i<size; i++ ) n += ptcl_multi[i].size();
n += list_iso.size();
return n;
}
PS::S32 getNumberOfParticleGlobal() {
#ifdef PARTICLE_SIMULATOR_MPI_PARALLEL
PS::S32 n_ptcl_loc = getNumberOfParticleLocal();
return PS::Comm::getSum(n_ptcl_loc);
#else
return getNumberOfParticleLocal();
#endif
}
PS::S32 getNumberOfParticleInLargestClusterLocal(){
PS::S32 n = 0;
PS::S32 size = ptcl_multi.size();
for ( PS::S32 i=0; i<size; i++ )
if ( n < size ) n = ptcl_multi[i].size();
return n;
}
PS::S32 getNumberOfParticleInLargestClusterGlobal() {
#ifdef PARTICLE_SIMULATOR_MPI_PARALLEL
PS::S32 n_loc = getNumberOfParticleInLargestClusterLocal();
return PS::Comm::getMaxValue(n_loc);
#else
return getNumberOfParticleInLargestClusterLocal();
#endif
}
PS::S32 getNumberOfFragmentLocal() const { return n_frag; }
PS::S32 getNumberOfFragmentGlobal() const {
#ifdef PARTICLE_SIMULATOR_MPI_PARALLEL
PS::S32 n_frag_ = n_frag;
return PS::Comm::getSum(n_frag_);
#else
return n_frag;
#endif
}
PS::S32 getNumberOfCollisionLocal() const { return n_col; }
PS::S32 getNumberOfCollisionGlobal() const {
#ifdef PARTICLE_SIMULATOR_MPI_PARALLEL
PS::S32 n_col_ = n_col;
return PS::Comm::getSum(n_col_);
#else
return n_col;
#endif
}
PS::F64 getEnergyDissipationLocal() const { return edisp; }
PS::F64 getEnergyDissipationGlobal() const {
#ifdef PARTICLE_SIMULATOR_MPI_PARALLEL
PS::F64 edisp_ = edisp;
return PS::Comm::getSum(edisp_);
#else
return edisp;
#endif
}
PS::F64 getHardEnergyDissipationLocal() const { return edisp_d; }
PS::F64 getHardEnergyDissipationGlobal() const {
#ifdef PARTICLE_SIMULATOR_MPI_PARALLEL
PS::F64 edisp_d_ = edisp_d;
return PS::Comm::getSum(edisp_d_);
#else
return edisp_d;
#endif
}
void showParticleID() const {
PS::S32 size = ptcl_multi.size();
for ( PS::S32 i=0; i<size; i++ ){
bool flag = ptcl_multi.at(i).at(0).inDomain;
std::cout << "Rank " << PS::Comm::getRank()
<< " Cluster " << ptcl_multi.at(i).at(0).id_cluster
<< " (" << flag << "): ";
PS::S32 sizei = ptcl_multi.at(i).size();
for ( PS::S32 j=0; j<sizei; j++ ){
std::cout << " " << ptcl_multi.at(i).at(j).id;
assert( flag == ptcl_multi.at(i).at(j).inDomain );
}
std::cout << std::endl;
}
}
void clear(){
list_iso.clear();
list_multi.clear();
ptcl_multi.clear();
mp_cluster.clear();
collision_list.clear();
frag_list.clear();
n_col = n_frag = 0;
edisp = edisp_d = 0.;
}
template <class Tpsys, class Tpsys2>
PS::S32 makeList(Tpsys & pp,
Tpsys2 & ex_pp);
template <class Tpsys, class Tpsys2, class NL, class NL2>
PS::S32 timeIntegrate(Tpsys & pp,
Tpsys2 & ex_pp,
NL & NList,
NL2 & ex_NList,
const PS::S32 istep);
static void rewriteFragmentID(PS::S32 * & id_frag_list,
Collision * & col_list,
PS::S32 n_col_tot,
PS::S32 n_frag_tot,
PS::S32 & id_next);
template <class Tpsys, class Tpsys2>
PS::S32 addFragment2ParticleSystem(Tpsys & pp,
Tpsys2 & ex_pp,
PS::S32 & id_next,
std::ofstream & fp);
};
template <class Tpsys, class Tpsys2>
inline PS::S32 HardSystem::makeList(Tpsys & pp,
Tpsys2 & ex_pp)
{
const PS::S32 n_pp = pp.getNumberOfParticleLocal();
const PS::S32 n_ex_pp = ex_pp.getNumberOfParticleLocal();
PS::S32 n_ptcl_loc = 0;
PS::S32 n_cluster_loc = 0;
PS::U32 tmp = 0;
for ( PS::S32 i=0; i<n_pp; i++ ){
if ( pp[i].neighbor ){
if ( pp[i].isSent ) continue;
auto itr = mp_cluster.find(pp[i].id_cluster);
if ( itr == mp_cluster.end() ){
mp_cluster[pp[i].id_cluster] = tmp;
list_multi.push_back(std::vector<std::pair<bool,PS::S32> >{std::make_pair(true, i)});
tmp ++;
assert( list_multi.size() == tmp );
} else {
assert( mp_cluster.at(pp[i].id_cluster) == itr->second );
list_multi.at(itr->second).push_back(std::make_pair(true, i));
}
n_ptcl_loc ++;
} else {
list_iso.push_back(i);
n_ptcl_loc ++;
}
}
for ( PS::S32 i=0; i<n_ex_pp; i++ ){
assert( ex_pp[i].neighbor > 0 );
assert( ex_pp[i].isSent );
assert( !ex_pp[i].inDomain );
auto itr = mp_cluster.find(ex_pp[i].id_cluster);
if ( itr == mp_cluster.end() ){
mp_cluster[ex_pp[i].id_cluster] = tmp;
list_multi.push_back(std::vector<std::pair<bool,PS::S32> >{std::make_pair(false, i)});
tmp ++;
assert( list_multi.size() == tmp );
} else {
assert( mp_cluster.at(ex_pp[i].id_cluster) == itr->second );
list_multi.at(itr->second).push_back(std::make_pair(false, i));
}
n_ptcl_loc ++;
}
n_cluster_loc = tmp;
assert( list_multi.size() == tmp );
ptcl_multi.resize(n_cluster_loc);
return n_ptcl_loc;
}
#if 0
template <class Tpsys, class Tpsys2, class NL, class NL2>
inline PS::S32 HardSystem::timeIntegrate(Tpsys & pp,
Tpsys2 & ex_pp,
NL & NList,
NL2 & ex_NList,
const PS::S32 istep)
{
PS::S32 n_all = list_multi.size() + list_iso.size();
PS::S32 n_ptcl_loc = 0;
#pragma omp parallel for reduction(+:n_ptcl_loc) schedule (dynamic)
for ( PS::S32 ii=0; ii<n_all; ii++ ){
PS::S32 size = list_multi.size();
if ( ii<size ){
PS::S32 i = ii;
//for ( PS::S32 i=0; i<size; i++ ){
PS::S32 n_p = list_multi.at(i).size();
PS::S32 id_cluster = 0;
std::map<PS::S32, PS::S32> id_map;
ptcl_multi[i].clear();
id_map.clear();
// Add Particle To Hard System
ptcl_multi[i].reserve(n_p);
for ( PS::S32 j=0; j<n_p; j++ ){
std::pair<bool,PS::S32> adr = list_multi.at(i).at(j);
PS::S32 id_loc = adr.second;
if ( adr.first ) {
ptcl_multi[i].push_back(FPHard(pp[id_loc]));
ptcl_multi[i][j].copyList(NList[id_loc]);
} else {
ptcl_multi[i].push_back(FPHard(ex_pp[id_loc]));
ptcl_multi[i][j].copyList(ex_NList[id_loc]);
}
if ( j==0 ) id_cluster = ptcl_multi[i][j].id_cluster;
assert ( ptcl_multi[i][j].id_cluster == id_cluster );
id_map[ptcl_multi[i][j].id] = j;
}
// Make Neighbor List
#ifdef TEST_PTCL
for ( PS::S32 j=0; j<n_p; j++ ) ptcl_multi[i][j].makeHardList(id_map, ptcl_multi[i]);
#else
for ( PS::S32 j=0; j<n_p; j++ ) ptcl_multi[i][j].makeHardList(id_map);
#endif
PS::S32 n_col_tmp = 0;
PS::S32 n_frag_tmp = 0;
PS::F64 edisp_tmp = 0.;
PS::F64 edisp_d_tmp = 0.;
timeIntegrate_multi(ptcl_multi[i], 0., FPGrav::dt_tree,
n_col_tmp, n_frag_tmp, edisp_tmp, edisp_d_tmp, collision_list_tmp);
if ( n_col_tmp > 0 ){
#pragma omp critical
{
n_col += n_col_tmp;
n_frag += n_frag_tmp;
edisp += edisp_tmp;
edisp_d += edisp_d_tmp;
for ( PS::S32 j=0; j<n_col_tmp; j++ )
collision_list.push_back(collision_list_tmp.at(j));
for ( PS::S32 j=0; j<n_frag_tmp; j++ )
frag_list.push_back(std::make_pair(i, ptcl_multi[i].size()-n_frag_tmp+j));
}
}
PS::S32 sizei = ptcl_multi[i].size();
for ( PS::S32 j=0; j<sizei; j++ ) {
ptcl_multi[i][j].n_cluster = ptcl_multi[i].size();
ptcl_multi[i][j].resetTime();
}
for ( PS::S32 j=0; j<sizei-n_frag_tmp; j++ ){
std::pair<bool,PS::S32> adr = list_multi.at(i).at(j);
PS::S32 id_loc = adr.second;
if ( adr.first ) {
if ( !ptcl_multi[i][j].isDead ) assert ( pp[id_loc].id == ptcl_multi[i][j].id );
pp[id_loc] = FPGrav(ptcl_multi[i][j]);
pp[id_loc].neighbor = ptcl_multi[i][j].n_list.size();
} else {
if ( !ptcl_multi[i][j].isDead ) assert ( ex_pp[id_loc].id == ptcl_multi[i][j].id );
ex_pp[id_loc] = FPGrav(ptcl_multi[i][j]);
ex_pp[id_loc].neighbor = ptcl_multi[i][j].n_list.size();
}
n_ptcl_loc ++;
}
} else {
PS::S32 i = ii - list_multi.size();
//#pragma omp parallel for reduction(+:n_ptcl_loc)
//for ( PS::S32 i=0; i<list_iso.size(); i++ ){
if ( pp[list_iso[i]].getEccentricity() < 0.8 && FPGrav::eps2 == 0. ){
timeIntegrateKepler_isolated(pp[list_iso[i]], (istep-1)*FPGrav::dt_tree, istep*FPGrav::dt_tree);
} else {
FPHard pi = FPHard(pp[list_iso[i]]);
timeIntegrate_isolated(pi, 0., FPGrav::dt_tree);
pi.resetTime();
pp[list_iso[i]] = FPGrav(pi);
}
n_ptcl_loc ++;
pp[list_iso[i]].n_cluster = 1;
}
}
return n_ptcl_loc;
}
#else
template <class Tpsys, class Tpsys2, class NL, class NL2>
inline PS::S32 HardSystem::timeIntegrate(Tpsys & pp,
Tpsys2 & ex_pp,
NL & NList,
NL2 & ex_NList,
const PS::S32 istep)
{
PS::S32 n_ptcl_loc = 0;
const PS::S32 large_cluster_size = 64;
PS::S32 size = list_multi.size();
std::vector<PS::S32> small_cluster, large_cluster;
small_cluster.clear();
large_cluster.clear();
for ( PS::S32 i=0; i<size; i++ ){
if ( list_multi.at(i).size() < large_cluster_size ){
small_cluster.push_back(i);
} else {
large_cluster.push_back(i);
}
}
PS::S32 n_small = small_cluster.size();
PS::S32 n_large = large_cluster.size();
PS::S32 n_all = n_small + list_iso.size();
///////////////////////////////////
/// Integrate Large Cluster ///
///////////////////////////////////
for ( PS::S32 ii=0; ii<n_large; ii++ ){
PS::S32 i = large_cluster.at(ii);
PS::S32 n_p = list_multi.at(i).size();
PS::S32 id_cluster = 0;
std::map<PS::S32, PS::S32> id_map;
ptcl_multi[i].clear();
id_map.clear();
assert( large_cluster_size <= n_p );
// Add Particle To Hard System
ptcl_multi[i].reserve(n_p);
for ( PS::S32 j=0; j<n_p; j++ ){
std::pair<bool,PS::S32> adr = list_multi.at(i).at(j);
PS::S32 id_loc = adr.second;
if ( adr.first ) {
ptcl_multi[i].push_back(FPHard(pp[id_loc]));
ptcl_multi[i][j].copyList(NList[id_loc]);
} else {
ptcl_multi[i].push_back(FPHard(ex_pp[id_loc]));
ptcl_multi[i][j].copyList(ex_NList[id_loc]);
}
if ( j==0 ) id_cluster = ptcl_multi[i][j].id_cluster;
assert ( ptcl_multi[i][j].id_cluster == id_cluster );
id_map[ptcl_multi[i][j].id] = j;
}
// Make Neighbor List
#ifdef TEST_PTCL
#pragma omp parallel for
for ( PS::S32 j=0; j<n_p; j++ ) ptcl_multi[i][j].makeHardList(id_map, ptcl_multi[i]);
#else
#pragma omp parallel for
for ( PS::S32 j=0; j<n_p; j++ ) ptcl_multi[i][j].makeHardList(id_map);
#endif
PS::S32 n_col_tmp = 0;
PS::S32 n_frag_tmp = 0;
PS::F64 edisp_tmp = 0.;
PS::F64 edisp_d_tmp = 0.;
std::vector<Collision> collision_list_tmp;
timeIntegrate_multi_omp(ptcl_multi[i], 0., FPGrav::dt_tree,
n_col_tmp, n_frag_tmp, edisp_tmp, edisp_d_tmp, collision_list_tmp);
if ( n_col_tmp > 0 ){
n_col += n_col_tmp;
n_frag += n_frag_tmp;
edisp += edisp_tmp;
edisp_d += edisp_d_tmp;
for ( PS::S32 j=0; j<n_col_tmp; j++ )
collision_list.push_back(collision_list_tmp.at(j));
for ( PS::S32 j=0; j<n_frag_tmp; j++ )
frag_list.push_back(std::make_pair(i, ptcl_multi[i].size()-n_frag_tmp+j));
}
PS::S32 sizei = ptcl_multi[i].size();
#pragma omp parallel for
for ( PS::S32 j=0; j<sizei; j++ ) {
ptcl_multi[i][j].n_cluster = ptcl_multi[i].size();
ptcl_multi[i][j].resetTime();
}
#pragma omp parallel for reduction(+:n_ptcl_loc)
for ( PS::S32 j=0; j<sizei-n_frag_tmp; j++ ){
std::pair<bool,PS::S32> adr = list_multi.at(i).at(j);
PS::S32 id_loc = adr.second;
if ( adr.first ) {
if ( !ptcl_multi[i][j].isDead ) assert ( pp[id_loc].id == ptcl_multi[i][j].id );
pp[id_loc] = FPGrav(ptcl_multi[i][j]);
pp[id_loc].neighbor = ptcl_multi[i][j].n_list.size();
} else {
if ( !ptcl_multi[i][j].isDead ) assert ( ex_pp[id_loc].id == ptcl_multi[i][j].id );
ex_pp[id_loc] = FPGrav(ptcl_multi[i][j]);
ex_pp[id_loc].neighbor = ptcl_multi[i][j].n_list.size();
}
n_ptcl_loc ++;
}
}
//////////////////////////////////////////////////////
/// Integrate Small Cluster & Isolated Particle ///
//////////////////////////////////////////////////////
#pragma omp parallel for reduction(+:n_ptcl_loc) schedule (dynamic)
for ( PS::S32 ii=0; ii<n_all; ii++ ){
if ( ii<n_small ){
PS::S32 i = small_cluster.at(ii);
PS::S32 n_p = list_multi.at(i).size();
PS::S32 id_cluster = 0;
std::map<PS::S32, PS::S32> id_map;
ptcl_multi[i].clear();
id_map.clear();
assert( large_cluster_size > n_p );
// Add Particle To Hard System
ptcl_multi[i].reserve(n_p);
for ( PS::S32 j=0; j<n_p; j++ ){
std::pair<bool,PS::S32> adr = list_multi.at(i).at(j);
PS::S32 id_loc = adr.second;
if ( adr.first ) {
ptcl_multi[i].push_back(FPHard(pp[id_loc]));
ptcl_multi[i][j].copyList(NList[id_loc]);
} else {
ptcl_multi[i].push_back(FPHard(ex_pp[id_loc]));
ptcl_multi[i][j].copyList(ex_NList[id_loc]);
}
if ( j==0 ) id_cluster = ptcl_multi[i][j].id_cluster;
assert ( ptcl_multi[i][j].id_cluster == id_cluster );
id_map[ptcl_multi[i][j].id] = j;
}
// Make Neighbor List
#ifdef TEST_PTCL
for ( PS::S32 j=0; j<n_p; j++ ) ptcl_multi[i][j].makeHardList(id_map, ptcl_multi[i]);
#else
for ( PS::S32 j=0; j<n_p; j++ ) ptcl_multi[i][j].makeHardList(id_map);
#endif
PS::S32 n_col_tmp = 0;
PS::S32 n_frag_tmp = 0;
PS::F64 edisp_tmp = 0.;
PS::F64 edisp_d_tmp = 0.;
std::vector<Collision> collision_list_tmp;
//timeIntegrate_multi(ptcl_multi[i], (istep-1)*FPGrav::dt_tree, istep*FPGrav::dt_tree, f,
// n_col_tmp, n_frag_tmp, edisp_tmp, edisp_d_tmp, collision_list_tmp);
timeIntegrate_multi(ptcl_multi[i], 0., FPGrav::dt_tree,
n_col_tmp, n_frag_tmp, edisp_tmp, edisp_d_tmp, collision_list_tmp);
if ( n_col_tmp > 0 ){
#pragma omp critical
{
n_col += n_col_tmp;
n_frag += n_frag_tmp;
edisp += edisp_tmp;
edisp_d += edisp_d_tmp;
for ( PS::S32 j=0; j<n_col_tmp; j++ )
collision_list.push_back(collision_list_tmp.at(j));
for ( PS::S32 j=0; j<n_frag_tmp; j++ )
frag_list.push_back(std::make_pair(i, ptcl_multi[i].size()-n_frag_tmp+j));
}
}
PS::S32 sizei = ptcl_multi[i].size();
for ( PS::S32 j=0; j<sizei; j++ ) {
ptcl_multi[i][j].n_cluster = ptcl_multi[i].size();
ptcl_multi[i][j].resetTime();
}
for ( PS::S32 j=0; j<sizei-n_frag_tmp; j++ ){
std::pair<bool,PS::S32> adr = list_multi.at(i).at(j);
PS::S32 id_loc = adr.second;
if ( adr.first ) {
if ( !ptcl_multi[i][j].isDead ) assert ( pp[id_loc].id == ptcl_multi[i][j].id );
pp[id_loc] = FPGrav(ptcl_multi[i][j]);
pp[id_loc].neighbor = ptcl_multi[i][j].n_list.size();
} else {
if ( !ptcl_multi[i][j].isDead ) assert ( ex_pp[id_loc].id == ptcl_multi[i][j].id );
ex_pp[id_loc] = FPGrav(ptcl_multi[i][j]);
ex_pp[id_loc].neighbor = ptcl_multi[i][j].n_list.size();
}
n_ptcl_loc ++;
}
} else {
PS::S32 i = ii - n_small;
if ( pp[list_iso[i]].getEccentricity() < 0.8 && FPGrav::eps2 == 0. ){
timeIntegrateKepler_isolated(pp[list_iso[i]], (istep-1)*FPGrav::dt_tree, istep*FPGrav::dt_tree);
} else {
FPHard pi = FPHard(pp[list_iso[i]]);
timeIntegrate_isolated(pi, 0., FPGrav::dt_tree);
//timeIntegrate_isolated(pi, (istep-1)*FPGrav::dt_tree, istep*FPGrav::dt_tree);
pi.resetTime();
pp[list_iso[i]] = FPGrav(pi);
}
n_ptcl_loc ++;
pp[list_iso[i]].n_cluster = 1;
}
}
return n_ptcl_loc;
}
#endif
inline void HardSystem::rewriteFragmentID(PS::S32 * & id_frag_list,
Collision * & col_list,
PS::S32 n_col_tot,
PS::S32 n_frag_tot,
PS::S32 & id_next)
{
std::map<PS::S32, PS::S32> id_old2new;
id_old2new.clear();
for ( PS::S32 i=0; i<n_col_tot; i++ ){
PS::S32 n_fragi = col_list[i].getNumberOfFragment();
PS::S32 id_fragi = col_list[i].getFragmentID();
if ( n_fragi == 0 ) continue;
for ( PS::S32 j=0; j<n_fragi; j++ ){
id_old2new[id_fragi - j] = id_next;
id_next ++;
}
}
for ( PS::S32 i=0; i<n_frag_tot; i++ )
if ( id_frag_list[i] < 0 ) id_frag_list[i] = id_old2new.at(id_frag_list[i]);
for ( PS::S32 i=0; i<n_col_tot; i++ )
col_list[i].setNewFragmentID(id_old2new);
}
template <class Tpsys, class Tpsys2>
inline PS::S32 HardSystem::addFragment2ParticleSystem(Tpsys & pp,
Tpsys2 & ex_pp,
PS::S32 & id_next,
std::ofstream & fp)
{
const PS::S32 n_proc = PS::Comm::getNumberOfProc();
PS::S32 * n_col_list = nullptr;
PS::S32 * n_frag_list = nullptr;
Collision * col_list_tot = nullptr;
PS::S32 * id_frag_list = nullptr;
PS::S32 * id_frag_loc = nullptr;
PS::S32 * col_recv = nullptr;
PS::S32 * frag_recv = nullptr;
id_frag_loc = new PS::S32[n_frag];
for ( PS::S32 i=0; i<n_frag; i++ ){
std::pair<PS::S32, PS::S32> adr = frag_list.at(i);
id_frag_loc[i] = ptcl_multi[adr.first][adr.second].id;
}
if ( PS::Comm::getRank() == 0 ){
n_col_list = new PS::S32[n_proc];
n_frag_list = new PS::S32[n_proc];
col_recv = new PS::S32[n_proc];
frag_recv = new PS::S32[n_proc];
}
// Send Number of Collision & Fragments
#ifdef PARTICLE_SIMULATOR_MPI_PARALLEL
MPI_Gather(&n_col, 1, PS::GetDataType(n_col),
n_col_list, 1, PS::GetDataType(*n_col_list), 0, MPI_COMM_WORLD);
MPI_Gather(&n_frag, 1, PS::GetDataType(n_frag),
n_frag_list, 1, PS::GetDataType(*n_frag_list), 0, MPI_COMM_WORLD);
#else
n_col_list[0] = n_col;
n_frag_list[0] = n_frag;
#endif
//PS::Comm::gather(&n_col, 1, n_col_list);
//PS::Comm::gather(&n_frag, 1, n_frag_list);
PS::S32 n_col_tot = 0;
PS::S32 n_frag_tot = 0;
if ( PS::Comm::getRank() == 0 ){
PS::S32 tmp_col = 0;
PS::S32 tmp_frag = 0;
for ( PS::S32 i=0; i<n_proc; i++ ){
col_recv[i] = tmp_col;
frag_recv[i] = tmp_frag;
tmp_col += n_col_list[i];
tmp_frag += n_frag_list[i];
}
//col_recv[n_proc] = tmp_col;
//frag_recv[n_proc] = tmp_frag;
n_col_tot = tmp_col;
n_frag_tot = tmp_frag;
col_list_tot = new Collision[n_col_tot];
id_frag_list = new PS::S32[n_frag_tot];
}
// Send Collision Information & Fragments ID
#ifdef PARTICLE_SIMULATOR_MPI_PARALLEL
MPI_Gatherv(&collision_list[0], n_col, PS::GetDataType(collision_list[0]),
col_list_tot, n_col_list, col_recv, PS::GetDataType(*col_list_tot), 0, MPI_COMM_WORLD);
MPI_Gatherv(id_frag_loc, n_frag, PS::GetDataType(*id_frag_loc),
id_frag_list, n_frag_list, frag_recv, PS::GetDataType(*id_frag_list), 0, MPI_COMM_WORLD);
#else
for(PS::S32 i=0; i<n_col; i++) col_list_tot[i] = collision_list[i];
for(PS::S32 i=0; i<n_frag; i++) id_frag_list[i] = id_frag_loc[i];
#endif
//PS::Comm::gatherV(&collision_list[0], n_col, col_list_tot, n_col_list, col_recv);
//PS::Comm::gatherV(id_frag_loc, n_frag, id_frag_list, n_frag_list, frag_recv);
// Rewrite Fragments ID
if ( PS::Comm::getRank() == 0 ){
rewriteFragmentID(id_frag_list, col_list_tot, n_col_tot, n_frag_tot, id_next);
}
// Return Fragments ID
#ifdef PARTICLE_SIMULATOR_MPI_PARALLEL
MPI_Scatterv(id_frag_list, n_frag_list, frag_recv, PS::GetDataType(*id_frag_list),
id_frag_loc, n_frag, PS::GetDataType(*id_frag_loc), 0, MPI_COMM_WORLD);
#else
for(int i=0; i<n_frag_list[0]; i++) id_frag_loc[i] = id_frag_list[i];
#endif
//PS::Comm::scatterV(id_frag_list, n_frag_list, frag_recv, id_frag_loc, n_frag);
assert( (PS::S32)(frag_list.size()) == n_frag );
for ( PS::S32 i=0; i<n_frag; i++ ){
std::pair<PS::S32, PS::S32> adr = frag_list.at(i);
if ( ptcl_multi[adr.first][adr.second].isMerged ) {
PS::S32 sizef = list_multi.at(adr.first).size();
for ( PS::S32 j=0; j<sizef; j++ ) {
if ( ptcl_multi[adr.first][j].id == ptcl_multi[adr.first][adr.second].id &&
j != adr.second ) {
std::pair<bool,PS::S32> adr_j = list_multi.at(adr.first).at(j);
if ( adr_j.first ) {
pp[adr_j.second].id = id_frag_loc[i];
assert ( pp[adr_j.second].isDead );
} else {
ex_pp[adr_j.second].id = id_frag_loc[i];
assert ( ex_pp[adr_j.second].isDead );
}
ptcl_multi[adr.first][j].id = id_frag_loc[i];
assert ( ptcl_multi[adr.first][j].isDead );
}
}
}
ptcl_multi[adr.first][adr.second].id = id_frag_loc[i];
pp.addOneParticle(ptcl_multi[adr.first][adr.second]);
assert( ptcl_multi[adr.first][adr.second].time_c == 0. );
}
PS::Comm::broadcast(&id_next, 1);
if ( PS::Comm::getRank() == 0 ){
// Output Collision Information
for ( PS::S32 i=0; i<n_col_tot; i++ ) col_list_tot[i].write2File(fp);
delete [] n_col_list;
delete [] n_frag_list;
delete [] col_list_tot;
delete [] id_frag_list;
delete [] col_recv;
delete [] frag_recv;
}
delete [] id_frag_loc;
return n_frag_tot;
}
|
GB_unop__abs_uint32_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__abs_uint32_uint32
// op(A') function: GB_unop_tran__abs_uint32_uint32
// C type: uint32_t
// A type: uint32_t
// cast: uint32_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__abs_uint32_uint32
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__abs_uint32_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sparseOptimizedBlocksJacobi.h | //
// Created by mbarb on 23/02/2018.
//
#ifndef PARALLELITERATIVE_SPARSEOPTIMIZEDBLOCKSJACOBI_H
#define PARALLELITERATIVE_SPARSEOPTIMIZEDBLOCKSJACOBI_H
#include "Eigen"
#include "utils.h"
#include "sparseParallelJacobi.h"
namespace Iterative {
template <typename Scalar>
class sparseOptimizedBlocksJacobi : public sparseParallelJacobi<Scalar> {
public:
/**
*
* @param A linear system matrix
* @param b known term vector
* @param iterations max number of iterations
* @param tolerance min error tolerated
* @param workers number of threads
* @param blockSize size of the block
*/
explicit sparseOptimizedBlocksJacobi(
const Eigen::SparseMatrix<Scalar>& A,
const Eigen::ColumnVector<Scalar, Eigen::Dynamic>& b,
const ulonglong iterations,
const Scalar tolerance,
const ulong workers = 0L,
const ulonglong blockSize = 0L) :
sparseParallelJacobi<Scalar>::sparseParallelJacobi(A, b, iterations, tolerance, workers) {
this->blockSize = blockSize;
if (blockSize == 0)
this->blockSize = std::max(ulong(this->A.cols() / workers), (ulong)1L);
splitter();
}
/**
*
* @return
*/
const Eigen::ColumnVector<Scalar, Eigen::Dynamic> &solve() {
Eigen::ColumnVector<Scalar, Eigen::Dynamic> oldSolution(this->solution);
std::vector<Eigen::Matrix<Scalar,Eigen::Dynamic,Eigen::Dynamic>> inverses(blocks.size());
Eigen::Matrix<Scalar,Eigen::Dynamic, Eigen::Dynamic> I(this->blockSize,this->blockSize);
Eigen::SimplicialLDLT<Eigen::SparseMatrix<Scalar>> solver;
I.setIdentity();
// compute the inverses of the blocks and memorize it
#pragma omp parallel for firstprivate(I) private(solver)
for (int i = 0; i < blocks.size()-1; ++i) {
Eigen::SparseMatrix<Scalar> block = this->A.block(blocks[i].startCol, blocks[i].startRow, blocks[i].cols,
blocks[i].rows);
solver.compute(block);
inverses[i] = solver.solve(I);
}
{
Eigen::SparseMatrix<Scalar> block = this->A.block(blocks.back().startCol, blocks.back().startRow,
blocks.back().cols,blocks.back().rows);
if(block.cols()!=this->blockSize || block.rows()!=this->blockSize){
I.resize(block.rows(), block.cols());
I.setIdentity();
}
solver.compute(block);
inverses.back() = solver.solve(I);
}
// start iterations
std::vector<int> index;
Eigen::ColumnVector<Scalar, Eigen::Dynamic> Ax =
Eigen::ColumnVector<Scalar, Eigen::Dynamic>::Zero(this->solution.rows(),this->solution.cols());
for (this->iteration=0L; this->iteration < this->iterations; ++this->iteration) {
Ax = this->A*oldSolution;
#pragma omp parallel for schedule(dynamic)
for (auto i = 0; i < inverses.size(); ++i) {
auto oldBlock = oldSolution.segment(blocks[i].startCol, blocks[i].cols);//
// the segment of the solution b that this inverse approximates
auto block = this->solution.segment(blocks[i].startCol, blocks[i].cols);
// approximate the solution using the inverse and the solution at the previous iteration
Eigen::ColumnVector<Scalar,Eigen::Dynamic> correction =
Eigen::ColumnVector<Scalar,Eigen::Dynamic>::Zero(oldSolution.rows(), oldSolution.cols());
for (auto col = blocks[i].startCol; col < blocks[i].startCol+blocks[i].cols; ++col) {
correction+=this->A.col(col)*oldSolution[col];
}
block = inverses[i] * (this->b - Ax + correction).segment(blocks[i].startCol, blocks[i].cols);
if ((oldBlock - block).template lpNorm<1>() <= this->tolerance*block.size()) {
#pragma omp critical
index.emplace_back(i);
}
}
if (!index.empty()) {
std::sort(index.rbegin(), index.rend());
for (auto i : index) {
blocks.erase(blocks.begin() + i);
inverses.erase(inverses.begin() + i);
}
if (inverses.empty()) break;
index.clear();
}
std::swap(this->solution, oldSolution);
}
std::cout << this->iteration << std::endl;
return this->solution;
}
protected:
ulonglong blockSize;
std::vector<Index> blocks;
void splitter() {
for (ulonglong i = 0; i < this->A.cols(); i += blockSize) {
blocks.emplace_back(Index(i, std::min(blockSize, (ulonglong)this->A.cols() - i),
i, std::min(blockSize, (ulonglong)this->A.rows() - i)));
}
}
private:
};
}
#endif //PARALLELITERATIVE_SPARSEOPTIMIZEDBLOCKSJACOBI_H
|
dsyrk.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zsyrk.c, normal z -> d, Fri Sep 28 17:38:03 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_syrk
*
* Performs one of the symmetric rank k operations
*
* \f[ C = \alpha A \times A^T + \beta C, \f]
* or
* \f[ C = \alpha A^T \times A + \beta C, \f]
*
* where alpha and beta are scalars, C is an n-by-n symmetric
* matrix, and A is an n-by-k matrix in the first case and a k-by-n
* matrix in the second case.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans: \f[ C = \alpha A \times A^T + \beta C; \f]
* - PlasmaTrans: \f[ C = \alpha A^T \times A + \beta C. \f]
*
* @param[in] n
* The order of the matrix C. n >= 0.
*
* @param[in] k
* If trans = PlasmaNoTrans, number of columns of the A matrix;
* if trans = PlasmaTrans, number of rows of the A matrix.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* A is an lda-by-ka matrix.
* If trans = PlasmaNoTrans, ka = k;
* if trans = PlasmaTrans, ka = n.
*
* @param[in] lda
* The leading dimension of the array A.
* If trans = PlasmaNoTrans, lda >= max(1, n);
* if trans = PlasmaTrans, lda >= max(1, k).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] pC
* C is an ldc-by-n matrix.
* On exit, the uplo part of the matrix is overwritten
* by the uplo part of the updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1, n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_dsyrk
* @sa plasma_csyrk
* @sa plasma_dsyrk
* @sa plasma_ssyrk
*
******************************************************************************/
int plasma_dsyrk(plasma_enum_t uplo, plasma_enum_t trans,
int n, int k,
double alpha, double *pA, int lda,
double beta, double *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaTrans)) {
plasma_error("illegal value of trans");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (k < 0) {
plasma_error("illegal value of k");
return -4;
}
int am, an;
if (trans == PlasmaNoTrans) {
am = n;
an = k;
}
else {
am = k;
an = n;
}
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldc < imax(1, n)) {
plasma_error("illegal value of ldc");
return -10;
}
// quick return
if (n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_syrk(plasma, PlasmaRealDouble, n, k);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
am, an, 0, 0, am, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, n, 0, 0, n, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pC, ldc, C, &sequence, &request);
// Call the tile async function.
plasma_omp_dsyrk(uplo, trans,
alpha, A,
beta, C,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(C, pC, ldc, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&C);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_syrk
*
* Performs rank k update.
* Non-blocking tile version of plasma_dsyrk().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans: \f[ C = \alpha A \times A^T + \beta C; \f]
* - PlasmaTrans: \f[ C = \alpha A^T \times A + \beta C. \f]
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* Descriptor of matrix C.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dsyrk
* @sa plasma_omp_dsyrk
* @sa plasma_omp_csyrk
* @sa plasma_omp_dsyrk
* @sa plasma_omp_ssyrk
*
******************************************************************************/
void plasma_omp_dsyrk(plasma_enum_t uplo, plasma_enum_t trans,
double alpha, plasma_desc_t A,
double beta, plasma_desc_t C,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaTrans)) {
plasma_error("illegal value of trans");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
int k = trans == PlasmaNoTrans ? A.n : A.m;
if (C.m == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return;
// Call the parallel function.
plasma_pdsyrk(uplo, trans,
alpha, A,
beta, C,
sequence, request);
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(3*t1-1,2)),ceild(24*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(12*t1+Ny+21,8)),floord(24*t2+Ny+20,8)),floord(24*t1-24*t2+Nz+Ny+19,8));t3++) {
for (t4=max(max(max(0,ceild(3*t1-511,512)),ceild(24*t2-Nz-2044,2048)),ceild(8*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(12*t1+Nx+21,2048)),floord(24*t2+Nx+20,2048)),floord(8*t3+Nx+4,2048)),floord(24*t1-24*t2+Nz+Nx+19,2048));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),8*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),8*t3+6),2048*t4+2046),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
comm.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Copyright (c) 2015 by Contributors
*/
#ifndef MXNET_KVSTORE_COMM_H_
#define MXNET_KVSTORE_COMM_H_
#include <dmlc/omp.h>
#include <string>
#include <algorithm>
#include <utility>
#include <limits>
#include <vector>
#include <tuple>
#include <thread>
#include "mxnet/ndarray.h"
#include "gradient_compression.h"
#include "../ndarray/ndarray_function.h"
#include "../operator/tensor/sparse_retain-inl.h"
#include "./kvstore_utils.h"
namespace mxnet {
namespace kvstore {
/**
* \brief multiple device commmunication
*/
class Comm {
public:
Comm() {
pinned_ctx_ = Context::CPUPinned(0);
}
virtual ~Comm() { }
/**
* \brief init key with the data shape and storage shape
*/
virtual void Init(int key, const NDArrayStorageType stype,
const TShape& shape, int dtype = mshadow::kFloat32) = 0;
/**
* \brief returns src[0] + .. + src[src.size()-1]
*/
virtual const NDArray& Reduce(
int key, const std::vector<NDArray>& src, int priority) = 0;
/**
* \brief copy from src to dst[i] for every i
*/
virtual void Broadcast(
int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) = 0;
/**
* \brief broadcast src to dst[i] with target row_ids for every i
* \param key the identifier key for the stored ndarray
* \param src the source row_sparse ndarray to broadcast
* \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast,
where the row_ids are expected to be unique and sorted in row_id.data()
* \param priority the priority of the operation
*/
virtual void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) = 0;
/**
* \brief return a pinned contex
*/
Context pinned_ctx() const {
return pinned_ctx_;
}
/**
* \brief Sets gradient compression parameters to be able to
* perform reduce with compressed gradients
*/
void SetGradientCompression(std::shared_ptr<GradientCompression> gc) {
gc_ = gc;
}
protected:
Context pinned_ctx_;
std::shared_ptr<GradientCompression> gc_;
};
/**
* \brief an implemention of Comm that first copy data to CPU memeory, and then
* reduce there
*/
class CommCPU : public Comm {
public:
CommCPU() {
nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4);
bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000);
// TODO(junwu) delete the following data member, now for benchmark only
is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0);
}
virtual ~CommCPU() { }
void Init(int key, const NDArrayStorageType stype, const TShape& shape,
int type = mshadow::kFloat32) override {
// Delayed allocation - the dense merged buffer might not be used at all if push()
// only sees sparse arrays
bool delay_alloc = true;
merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type);
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
auto& buf = merge_buf_[key];
const auto stype = src[0].storage_type();
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
if (stype == kDefaultStorage) {
return src[0];
} else {
// With 'local' kvstore, we could store the weight on CPU while compute
// the gradient on GPU when the weight is extremely large.
// To avoiding copying the weight to the same context of the gradient,
// we always copy the gradient to merged buf.
NDArray& merged = buf.merged_buf(stype);
CopyFromTo(src[0], &merged, priority);
return merged;
}
}
NDArray& buf_merged = buf.merged_buf(stype);
// normal dense reduce
if (stype == kDefaultStorage) {
std::vector<Engine::VarHandle> const_vars(src.size() - 1);
std::vector<NDArray> reduce(src.size());
CopyFromTo(src[0], &buf_merged, priority);
reduce[0] = buf_merged;
if (buf.copy_buf.empty()) {
buf.copy_buf.resize(src.size()-1);
for (size_t j = 0; j < src.size() - 1; ++j) {
// allocate copy buffer
buf.copy_buf[j] = NDArray(
src[0].shape(), pinned_ctx_, false, src[0].dtype());
}
}
CHECK(stype == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << stype << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 1; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority);
reduce[i] = buf.copy_buf[i-1];
const_vars[i-1] = reduce[i].var();
}
Engine::Get()->PushAsync(
[reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) {
ReduceSumCPU(reduce);
on_complete();
}, Context::CPU(), const_vars, {reduce[0].var()},
FnProperty::kCPUPrioritized, priority, "KVStoreReduce");
} else {
// sparse reduce
std::vector<Engine::VarHandle> const_vars(src.size());
std::vector<NDArray> reduce(src.size());
if (buf.copy_buf.empty()) {
buf.copy_buf.resize(src.size());
for (size_t j = 0; j < src.size(); ++j) {
buf.copy_buf[j] = NDArray(
src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype());
}
}
CHECK(stype == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << stype << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 0; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
const_vars[i] = reduce[i].var();
}
Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(),
ResourceRequest(ResourceRequest::kTempSpace));
Engine::Get()->PushAsync(
[reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) {
NDArray out = buf_merged;
is_serial_push_?
ReduceSumCPUExSerial(reduce, &out)
: mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out);
on_complete();
}, Context::CPU(), const_vars, {buf_merged.var(), rsc.var},
FnProperty::kCPUPrioritized, priority, "KVStoreReduce");
}
return buf_merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
int mask = src.ctx().dev_mask();
if (mask == Context::kCPU) {
for (auto d : dst) CopyFromTo(src, d, priority);
} else {
// First copy data to pinned_ctx, then broadcast.
// Note that kv.init initializes the data on pinned_ctx.
// This branch indicates push() with ndarrays on gpus were called,
// and the source is copied to gpu ctx.
// Also indicates that buffers are already initialized during push().
auto& buf = merge_buf_[key].merged_buf(src.storage_type());
CopyFromTo(src, &buf, priority);
for (auto d : dst) CopyFromTo(buf, d, priority);
}
}
void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) override {
using namespace mshadow;
CHECK_EQ(src.storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row-sparse src NDArray";
CHECK_EQ(src.ctx().dev_mask(), Context::kCPU)
<< "BroadcastRowSparse with src on gpu context not supported";
for (size_t i = 0; i < dst.size(); ++i) {
NDArray* out = dst[i].first;
NDArray row_id = dst[i].second;
CHECK_EQ(out->storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row_sparse dst NDArray";
CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU)
<< "BroadcastRowSparse with row_indices on gpu context not supported";
// retain according to unique indices
const bool is_same_ctx = out->ctx() == src.ctx();
const bool is_diff_var = out->var() != src.var();
NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out :
NDArray(kRowSparseStorage, src.shape(), src.ctx(), true,
src.dtype(), src.aux_types());
if (!is_diff_var) {
common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) +
"refers to the same NDArray as the one stored in KVStore."
"Performing row_sparse_pull() with such output is going to change the "
"data stored in KVStore. Incorrect result may be generated "
"next time row_sparse_pull() is called. To avoid such an issue,"
"consider create a new NDArray buffer to store the output.");
}
Engine::Get()->PushAsync(
[=](RunContext rctx, Engine::CallbackOnComplete on_complete) {
const TBlob& indices = row_id.data();
NDArray temp = retained_cpu; // get rid the of const qualifier
op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(),
src, indices, kWriteTo,
&temp);
on_complete();
}, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()},
FnProperty::kNormal, priority, "KVStoreSparseRetain");
// if retained_cpu == out, CopyFromTo will ignore the copy operation
CopyFromTo(retained_cpu, out, priority);
}
}
private:
// reduce sum into val[0]
inline void ReduceSumCPU(const std::vector<NDArray> &in_data) {
MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, {
std::vector<DType*> dptr(in_data.size());
for (size_t i = 0; i < in_data.size(); ++i) {
TBlob data = in_data[i].data();
CHECK(data.CheckContiguous());
dptr[i] = data.FlatTo2D<cpu, DType>().dptr_;
}
size_t total = in_data[0].shape().Size();
ReduceSumCPUImpl(dptr, total);
});
}
// serial implementation of reduce sum for row sparse NDArray.
inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) {
using namespace rowsparse;
using namespace mshadow;
auto stype = out->storage_type();
CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype;
size_t total_num_rows = 0;
size_t num_in = in.size();
// skip the ones with empty indices and values
std::vector<bool> skip(num_in, false);
// the values tensor of the inputs
MSHADOW_TYPE_SWITCH(out->dtype(), DType, {
MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, {
std::vector<Tensor<cpu, 2, DType>> in_vals(num_in);
std::vector<Tensor<cpu, 1, IType>> in_indices(num_in);
// offset to the values tensor of all inputs
std::vector<size_t> offsets(num_in, 0);
std::vector<size_t> num_rows(num_in, 0);
for (size_t i = 0; i < num_in; i++) {
if (!in[i].storage_initialized()) {
skip[i] = true;
continue;
}
auto size = in[i].aux_shape(kIdx).Size();
num_rows[i] = size;
total_num_rows += size;
in_vals[i] = in[i].data().FlatTo2D<cpu, DType>();
in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>();
}
std::vector<IType> indices;
indices.reserve(total_num_rows);
// gather indices from all inputs
for (size_t i = 0; i < num_in; i++) {
for (size_t j = 0; j < num_rows[i]; j++) {
indices.emplace_back(in_indices[i][j]);
}
}
CHECK_EQ(indices.size(), total_num_rows);
// dedup indices
std::sort(indices.begin(), indices.end());
indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin());
// the one left are unique non-zero rows
size_t nnr = indices.size();
// allocate memory for output
out->CheckAndAlloc({Shape1(nnr)});
auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>();
auto val_data = out->data().FlatTo2D<cpu, DType>();
for (size_t i = 0; i < nnr; i++) {
// copy indices back
idx_data[i] = indices[i];
bool zeros = true;
for (size_t j = 0; j < num_in; j++) {
if (skip[j]) continue;
size_t offset = offsets[j];
if (offset < num_rows[j]) {
if (indices[i] == in_indices[j][offset]) {
if (zeros) {
Copy(val_data[i], in_vals[j][offset], nullptr);
zeros = false;
} else {
val_data[i] += in_vals[j][offset];
}
offsets[j] += 1;
}
}
}
}
});
});
}
template<typename DType>
inline static void ReduceSumCPU(
const std::vector<DType*> &dptr, size_t offset, index_t size) {
using namespace mshadow; // NOLINT(*)
Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size));
for (size_t i = 1; i < dptr.size(); i+=4) {
switch (dptr.size() - i) {
case 1: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
in_0 += in_1;
break;
}
case 2: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
in_0 += in_1 + in_2;
break;
}
case 3: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3;
break;
}
default: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3 + in_4;
break;
}
}
}
}
template<typename DType>
inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) {
const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10));
long ntask = (total + step - 1) / step; // NOLINT(*)
if (total < bigarray_bound_ || nthread_reduction_ <= 1) {
ReduceSumCPU(dptr, 0, total);
} else {
#pragma omp parallel for schedule(static) num_threads(nthread_reduction_)
for (long j = 0; j < ntask; ++j) { // NOLINT(*)
size_t k = static_cast<size_t>(j);
size_t begin = std::min(k * step, total);
size_t end = std::min((k + 1) * step, total);
if (j == ntask - 1) CHECK_EQ(end, total);
ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin));
}
}
}
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the merged value
NDArray merged;
/// \brief the cpu buffer for gpu data
std::vector<NDArray> copy_buf;
/// \brief the merged buffer for the given storage type
inline NDArray& merged_buf(NDArrayStorageType stype) {
if (stype == kDefaultStorage) {
return merged;
}
CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype;
// check if sparse_merged is initialized
if (sparse_merged.is_none()) {
CHECK(!merged.is_none());
sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(),
true, merged.dtype());
}
return sparse_merged;
}
private:
/// \brief the sparse merged value
NDArray sparse_merged;
};
std::unordered_map<int, BufferEntry> merge_buf_;
size_t bigarray_bound_;
int nthread_reduction_;
bool is_serial_push_;
};
/**
* \brief an implementation of Comm that performs reduction on device
* directly.
*
* It is faster if the total device-to-device bandwidths is larger than
* device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device
* memory.
*/
class CommDevice : public Comm {
public:
CommDevice() {
inited_ = false;
}
virtual ~CommDevice() { }
void Init(int key, const NDArrayStorageType stype, const TShape& shape,
int dtype = mshadow::kFloat32) override {
sorted_key_attrs_.emplace_back(key, shape, dtype);
}
void InitBuffersAndComm(const std::vector<NDArray>& src) {
if (!inited_) {
std::vector<Context> devs;
for (const auto& a : src) {
devs.push_back(a.ctx());
}
InitMergeBuffer(devs);
if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) {
EnableP2P(devs);
}
}
}
const NDArray& ReduceRowSparse(int key, const std::vector<NDArray>& src,
int priority) {
auto& buf = merge_buf_[key];
std::vector<NDArray> reduce(src.size());
const NDArrayStorageType stype = src[0].storage_type();
NDArray& buf_merged = buf.merged_buf(stype);
if (buf.copy_buf.empty()) {
// initialize buffer for copying during reduce
buf.copy_buf.resize(src.size());
for (size_t j = 0; j < src.size(); ++j) {
buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype());
}
}
CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 0; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf_merged, priority);
return buf_merged;
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
// when this reduce is called from kvstore_dist, gc is not set
// we don't do compression twice in dist_sync_device
if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) {
return ReduceCompressed(key, src, priority);
}
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
return src[0];
}
InitBuffersAndComm(src);
auto& buf = merge_buf_[key];
const NDArrayStorageType stype = src[0].storage_type();
NDArray& buf_merged = buf.merged_buf(stype);
// normal dense reduce
if (stype == kDefaultStorage) {
CopyFromTo(src[0], &buf_merged, priority);
std::vector<NDArray> reduce(src.size());
reduce[0] = buf_merged;
if (buf.copy_buf.empty()) {
// TODO(mli) this results in large device memory usage for huge ndarray,
// such as the largest fullc in VGG. consider to do segment reduce with
// NDArray.Slice or gpu direct memory access. for the latter, we need to
// remove some ctx check, and also it reduces 20% perf
buf.copy_buf.resize(src.size()-1);
for (size_t i = 0; i < src.size()-1; ++i) {
buf.copy_buf[i] = NDArray(
buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype());
}
}
for (size_t i = 0; i < src.size()-1; ++i) {
CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority);
reduce[i+1] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf_merged, priority);
} else {
// sparse reduce
buf_merged = ReduceRowSparse(key, src, priority);
}
return buf_merged;
}
const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src,
int priority) {
InitBuffersAndComm(src);
auto& buf = merge_buf_[key];
std::vector<NDArray> reduce(src.size());
if (buf.copy_buf.empty()) {
// one buf for each context
buf.copy_buf.resize(src.size());
buf.compressed_recv_buf.resize(src.size());
buf.compressed_send_buf.resize(src.size());
buf.residual.resize(src.size());
for (size_t i = 0; i < src.size(); ++i) {
buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(),
false, buf.merged.dtype());
buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(),
false, buf.merged.dtype());
buf.residual[i] = 0;
int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size());
buf.compressed_recv_buf[i] = NDArray(TShape{small_size}, buf.merged.ctx(),
false, buf.merged.dtype());
buf.compressed_send_buf[i] = NDArray(TShape{small_size}, src[i].ctx(),
false, buf.merged.dtype());
}
}
for (size_t i = 0; i < src.size(); ++i) {
// compress before copy
// this is done even if the data is on same context as copy_buf because
// we don't want the training to be biased towards data on this GPU
gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority);
if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) {
CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority);
} else {
// avoid memory copy when they are on same context
buf.compressed_recv_buf[i] = buf.compressed_send_buf[i];
}
gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf.merged);
return buf.merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
if (!inited_) {
// copy to a random device first
int dev_id = key % dst.size();
CopyFromTo(src, dst[dev_id], priority);
for (size_t i = 0; i < dst.size(); ++i) {
if (i != static_cast<size_t>(dev_id)) {
CopyFromTo(*dst[dev_id], dst[i], priority);
}
}
} else {
auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type());
CopyFromTo(src, &buf_merged, priority);
for (auto d : dst) {
CopyFromTo(buf_merged, d, priority);
}
}
}
void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) override {
CHECK_EQ(src.storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row-sparse src NDArray";
for (size_t i = 0; i < dst.size(); ++i) {
NDArray* out = dst[i].first;
NDArray row_id = dst[i].second;
CHECK_EQ(out->storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row_sparse dst NDArray";
CHECK_EQ(row_id.ctx(), src.ctx())
<< "row_id and src are expected to be on the same context";
// retain according to indices
const bool is_same_ctx = out->ctx() == src.ctx();
const bool is_diff_var = out->var() != src.var();
NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out :
NDArray(kRowSparseStorage, out->shape(), src.ctx(), true,
out->dtype(), out->aux_types());
if (!is_diff_var) {
common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) +
"refers to the same NDArray as the one stored in KVStore."
"Performing row_sparse_pull() with such output is going to change the "
"data stored in KVStore. Incorrect result may be generated "
"next time row_sparse_pull() is called. To avoid such an issue,"
"consider create a new NDArray buffer to store the output.");
}
bool is_gpu = retained_gpu.ctx().dev_mask() == gpu::kDevMask;
Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) {
const TBlob& indices = row_id.data();
using namespace mxnet::common;
NDArray temp = retained_gpu;
switch (temp.ctx().dev_mask()) {
case cpu::kDevMask: {
SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(),
src, indices, kWriteTo, &temp);
break;
}
#if MXNET_USE_CUDA
case gpu::kDevMask: {
SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(),
src, indices, kWriteTo, &temp);
// wait for GPU operations to complete
rctx.get_stream<gpu>()->Wait();
break;
}
#endif
default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR;
}
on_complete();
}, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()},
is_gpu ? FnProperty::kGPUPrioritized : FnProperty::kCPUPrioritized,
priority, "KVStoreSparseRetain");
CopyFromTo(retained_gpu, out, priority);
}
}
using KeyAttrs = std::tuple<int, TShape, int>;
// try to allocate buff on device evenly
void InitMergeBuffer(const std::vector<Context>& devs) {
std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), [](
const KeyAttrs& a, const KeyAttrs& b) {
return std::get<1>(a).Size() > std::get<1>(b).Size();
});
std::unordered_map<int, std::pair<Context, size_t>> ctx_info;
for (auto d : devs) {
ctx_info[d.dev_id] = std::make_pair(d, 0);
}
for (size_t i = 0; i < sorted_key_attrs_.size(); ++i) {
const int key = std::get<0>(sorted_key_attrs_[i]);
const TShape& shape = std::get<1>(sorted_key_attrs_[i]);
const int type = std::get<2>(sorted_key_attrs_[i]);
auto& buf = merge_buf_[key];
Context ctx;
size_t min_size = std::numeric_limits<size_t>::max();
for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) {
size_t size = it->second.second;
if (size <= min_size) {
ctx = it->second.first;
min_size = size;
}
}
// Delayed allocation - as the dense merged buffer might not be used at all if push()
// only sees sparse arrays
bool delay_alloc = true;
buf.merged = NDArray(shape, ctx, delay_alloc, type);
ctx_info[ctx.dev_id].second += shape.Size();
}
inited_ = true;
}
private:
void EnableP2P(const std::vector<Context>& devs) {
#if MXNET_USE_CUDA
std::vector<int> gpus;
for (const auto& d : devs) {
if (d.dev_mask() == gpu::kDevMask) {
gpus.push_back(d.dev_id);
}
}
int n = static_cast<int>(gpus.size());
int enabled = 0;
std::vector<int> p2p(n*n);
for (int i = 0; i < n; ++i) {
cudaSetDevice(gpus[i]);
for (int j = 0; j < n; j++) {
int access;
cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]);
if (access) {
cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0);
if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) {
++enabled;
p2p[i*n+j] = 1;
}
}
}
}
if (enabled != n*(n-1)) {
// print warning info if not fully enabled
LOG(WARNING) << "only " << enabled << " out of "
<< n*(n-1) << " GPU pairs are enabled direct access. "
<< "It may affect the performance. "
<< "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off";
std::string access(n, '.');
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
access[j] = p2p[i*n+j] ? 'v' : '.';
}
LOG(WARNING) << access;
}
}
#endif
}
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the dense merged value for reduce and broadcast operations
NDArray merged;
/// \brief the gpu buffer for copy during reduce operation
std::vector<NDArray> copy_buf;
/// \brief the residual buffer for gradient compression
std::vector<NDArray> residual;
/// \brief the small buffer for compressed data in sender
std::vector<NDArray> compressed_send_buf;
/// \brief the small buffer for compressed data in receiver
std::vector<NDArray> compressed_recv_buf;
/// \brief the merged buffer for the given storage type (could be either dense or row_sparse)
inline NDArray& merged_buf(NDArrayStorageType stype) {
if (stype == kDefaultStorage) {
CHECK(!merged.is_none()) << "unintialized merge buffer detected";
return merged;
}
CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype;
// check if sparse_merged is initialized
if (sparse_merged.is_none()) {
CHECK(!merged.is_none());
sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(),
true, merged.dtype());
}
return sparse_merged;
}
private:
/// \brief the sparse merged value for reduce and rowsparse broadcast operations
NDArray sparse_merged;
};
std::unordered_map<int, BufferEntry> merge_buf_;
public:
bool inited_;
std::vector<KeyAttrs> sorted_key_attrs_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_COMM_H_
|
pooling_layer.h | //Tencent is pleased to support the open source community by making FeatherCNN available.
//Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
//in compliance with the License. You may obtain a copy of the License at
//
//https://opensource.org/licenses/BSD-3-Clause
//
//Unless required by applicable law or agreed to in writing, software distributed
//under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
//CONDITIONS OF ANY KIND, either express or implied. See the License for the
//specific language governing permissions and limitations under the License.
#pragma once
#include "../feather_generated.h"
#include "../layer.h"
#include <math.h>
#include <limits>
#define MAX(a,b) ((a)>(b))?(a):(b)
#define MIN(a,b) ((a)<(b))?(a):(b)
namespace feather
{
void ave_pool_inner_kernel(float* out, const float* in, const size_t ldin, const size_t kernel_h, const size_t kernel_w)
{
float total = 0.0;
for (size_t m = 0; m != kernel_h; ++m)
{
for (size_t n = 0; n != kernel_w; ++n)
{
size_t pos = m * ldin + n;
total += in[pos];
}
}
*out = total / kernel_h / kernel_w;
}
void max_pool_inner_kernel(float* out, const float* in, const size_t ldin, const size_t kernel_h, const size_t kernel_w)
{
float max = 0.0;
for (size_t m = 0; m != kernel_h; ++m)
{
for (size_t n = 0; n != kernel_w; ++n)
{
size_t pos = m * ldin + n;
max = (in[pos] > max) ? in[pos] : max;
}
}
*out = max;
}
class PoolingLayer : public Layer<float>
{
public:
PoolingLayer(const LayerParameter *layer_param, RuntimeParameter<float>* rt_param)
: stride_height(1),
stride_width(1),
Layer<float>(layer_param, rt_param)
{
const PoolingParameter *pooling_param = layer_param->pooling_param();
kernel_height = pooling_param->kernel_h();
kernel_width = pooling_param->kernel_w();
pad_height = pooling_param->pad_h();
pad_width = pooling_param->pad_w();
stride_height = pooling_param->stride_h();
stride_width = pooling_param->stride_w();
stride_height = (stride_height <= 0) ? 1 : stride_height;
stride_width = (stride_width <= 0) ? 1 : stride_width;
global_pooling = pooling_param->global_pooling();
this->method = pooling_param->pool();
switch (this->method)
{
case PoolingParameter_::PoolMethod_MAX_:
_pool_inner_kernel = max_pool_inner_kernel;
break;
case PoolingParameter_::PoolMethod_AVE:
_pool_inner_kernel = ave_pool_inner_kernel;
break;
default:
fprintf(stderr, "Unsupported pool method\n");
}
//printf("kernel (%ld %ld) pad (%ld %ld) stride (%ld %ld) global_pooling %d\n",
// kernel_height, kernel_width, pad_height, pad_width, stride_height, stride_width, global_pooling);
}
int Forward()
{
fprintf(stderr, "Pooling layer %s\ninput shape %ld %ld %ld kernel shape %ld %ld stride %ld %ld\n", this->name().c_str(), input_channels, input_height, input_width, kernel_height, kernel_width, stride_height, stride_width);
fprintf(stderr, "output (%d %d)\n", output_height, output_width);
const float *input = _bottom_blobs[_bottom[0]]->data();
float *output = _top_blobs[_top[0]]->data();
float *p = output;
int slot = input_channels * output_height;
#pragma omp parallel for schedule(static) num_threads(num_threads)
// for (int u=0;u<slot;u++)
// {
for (int i = 0; i < input_channels; ++i)
{
for (int j = 0; j < output_height; j ++)
{
// int i=slot/output_height, j=slot%output_height;
float *p = output + i * output_height * output_width + j * output_width;
for (int l = 0; l < output_width; l++) p[l] = (this->method != PoolingParameter_::PoolMethod_MAX_ ? 0 : -1 * std::numeric_limits<float>::max()) ;
int tmp_pos = j * (int)stride_height - (int)pad_height;
int x_min = MAX(tmp_pos, 0);
int x_max = MIN((int)(tmp_pos + kernel_height), (int) input_height);
for (int k = 0; k < output_width; k ++)
{
int counter = 0;
float total = (this->method != PoolingParameter_::PoolMethod_MAX_ ? 0 : -1 * std::numeric_limits<float>::max());
for (int x = x_min; x < x_max; ++x)
{
int xpos = i * input_height * input_width + x * input_width;
int local_pos = k * (int)stride_width - (int)pad_width;
int y_min = MAX(local_pos, 0);
int y_max = MIN((int)(local_pos + kernel_width), (int) input_width);
for (int y = y_min; y < y_max; ++y)
{
float value = input[xpos + y];
if (this->method != PoolingParameter_::PoolMethod_MAX_) total += value, counter++;
else total = total > value ? total : value;
}
}
if (this->method != PoolingParameter_::PoolMethod_MAX_)
p[k] += total / (counter);
else p[k] = (p[k] > total) ? p[k] : total;
}
}
}
return 0;
}
int ForwardReshape()
{
const Blob<float> *bottom_blob = _bottom_blobs[_bottom[0]];
input_height = bottom_blob->height();
input_width = bottom_blob->width();
input_channels = bottom_blob->channels();
//printf("layer %s\n", _name.c_str());
//printf("input %lu %lu %lu\n", input_channels, input_height, input_width);
if (global_pooling)
{
kernel_height = input_height;
kernel_width = input_width;
output_height = 1;
output_width = 1;
output_channels = input_channels;
}
else
{
//General pooling.
output_channels = input_channels;
output_height = static_cast<int>(ceil(static_cast<float>(input_height + 2 * pad_height - kernel_height) / stride_height)) + 1;
output_width = static_cast<int>(ceil(static_cast<float>(input_width + 2 * pad_width - kernel_width) / stride_width)) + 1;
}
_top_blobs[_top[0]]->ReshapeWithRealloc(1, output_channels, output_height, output_width);
return Forward();
}
int GenerateTopBlobs()
{
//Only accept a single bottom blob.
const Blob<float> *bottom_blob = _bottom_blobs[_bottom[0]];
input_height = bottom_blob->height();
input_width = bottom_blob->width();
input_channels = bottom_blob->channels();
//printf("layer %s\n", _name.c_str());
//printf("input %lu %lu %lu\n", input_channels, input_height, input_width);
if (global_pooling)
{
kernel_height = input_height;
kernel_width = input_width;
output_height = 1;
output_width = 1;
output_channels = input_channels;
}
else
{
//General pooling.
output_channels = input_channels;
output_height = static_cast<int>(ceil(static_cast<float>(input_height + 2 * pad_height - kernel_height) / stride_height)) + 1;
output_width = static_cast<int>(ceil(static_cast<float>(input_width + 2 * pad_width - kernel_width) / stride_width)) + 1;
}
_top_blobs[_top[0]] = new Blob<float>(1, output_channels, output_height, output_width);
_top_blobs[_top[0]]->Alloc();
//_top_blobs[_top[0]]->PrintBlobInfo();
return 0;
}
private:
size_t input_height;
size_t input_width;
size_t input_channels;
size_t output_height;
size_t output_width;
size_t output_channels;
size_t pad_height;
size_t pad_width;
size_t kernel_height;
size_t kernel_width;
size_t stride_height;
size_t stride_width;
bool global_pooling;
PoolingParameter_::PoolMethod method;
void (*_pool_inner_kernel)(float* out, const float* in, const size_t ldin, const size_t kernel_h, const size_t kernel_w);
};
};
|
libimagequant.c | /*
** © 2009-2016 by Kornel Lesiński.
**
** This file is part of libimagequant.
**
** libimagequant is free software: you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation, either version 3 of the License, or
** (at your option) any later version.
**
** libimagequant is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with libimagequant. If not, see <http://www.gnu.org/licenses/>.
*/
/* Copyright (C) 1989, 1991 by Jef Poskanzer.
** Copyright (C) 1997, 2000, 2002 by Greg Roelofs; based on an idea by
** Stefan Schneider.
**
** Permission to use, copy, modify, and distribute this software and its
** documentation for any purpose and without fee is hereby granted, provided
** that the above copyright notice appear in all copies and that both that
** copyright notice and this permission notice appear in supporting
** documentation. This software is provided "as is" without express or
** implied warranty.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <limits.h>
#if !(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199900L) && !(defined(_MSC_VER) && _MSC_VER >= 1800)
#error "This program requires C99, e.g. -std=c99 switch in GCC or it requires MSVC 18.0 or higher."
#error "Ignore torrent of syntax errors that may follow. It's only because compiler is set to use too old C version."
#endif
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#define omp_get_thread_num() 0
#endif
#include "libimagequant.h"
#include "pam.h"
#include "mediancut.h"
#include "nearest.h"
#include "blur.h"
#include "kmeans.h"
#define LIQ_HIGH_MEMORY_LIMIT (1<<26) /* avoid allocating buffers larger than 64MB */
// each structure has a pointer as a unique identifier that allows type checking at run time
static const char liq_attr_magic[] = "liq_attr";
static const char liq_image_magic[] = "liq_image";
static const char liq_result_magic[] = "liq_result";
static const char liq_histogram_magic[] = "liq_histogram";
static const char liq_remapping_result_magic[] = "liq_remapping_result";
static const char liq_freed_magic[] = "free";
#define CHECK_STRUCT_TYPE(attr, kind) liq_crash_if_invalid_handle_pointer_given((const liq_attr*)attr, kind ## _magic)
#define CHECK_USER_POINTER(ptr) liq_crash_if_invalid_pointer_given(ptr)
struct liq_attr {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
double target_mse, max_mse, kmeans_iteration_limit;
float min_opaque_val;
unsigned int max_colors, max_histogram_entries;
unsigned int min_posterization_output /* user setting */, min_posterization_input /* speed setting */;
unsigned int kmeans_iterations, feedback_loop_trials;
bool last_index_transparent, use_contrast_maps, use_dither_map;
unsigned char speed;
unsigned char progress_stage1, progress_stage2, progress_stage3;
liq_progress_callback_function *progress_callback;
void *progress_callback_user_info;
liq_log_callback_function *log_callback;
void *log_callback_user_info;
liq_log_flush_callback_function *log_flush_callback;
void *log_flush_callback_user_info;
};
struct liq_image {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
f_pixel *f_pixels;
rgba_pixel **rows;
double gamma;
unsigned int width, height;
unsigned char *noise, *edges, *dither_map;
rgba_pixel *pixels, *temp_row;
f_pixel *temp_f_row;
liq_image_get_rgba_row_callback *row_callback;
void *row_callback_user_info;
float min_opaque_val;
f_pixel fixed_colors[256];
unsigned short fixed_colors_count;
bool free_pixels, free_rows, free_rows_internal;
};
typedef struct liq_remapping_result {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
unsigned char *pixels;
colormap *palette;
liq_progress_callback_function *progress_callback;
void *progress_callback_user_info;
liq_palette int_palette;
double gamma, palette_error;
float dither_level;
bool use_dither_map; unsigned char progress_stage1;
} liq_remapping_result;
struct liq_result {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
liq_remapping_result *remapping;
colormap *palette;
liq_progress_callback_function *progress_callback;
void *progress_callback_user_info;
liq_palette int_palette;
float dither_level;
double gamma, palette_error;
int min_posterization_output;
bool use_dither_map;
};
struct liq_histogram {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
struct acolorhash_table *acht;
double gamma;
f_pixel fixed_colors[256];
unsigned short fixed_colors_count;
unsigned short ignorebits;
bool had_image_added;
};
static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) LIQ_NONNULL;
static void contrast_maps(liq_image *image) LIQ_NONNULL;
static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) LIQ_NONNULL;
static const rgba_pixel *liq_image_get_row_rgba(liq_image *input_image, unsigned int row) LIQ_NONNULL;
static const f_pixel *liq_image_get_row_f(liq_image *input_image, unsigned int row) LIQ_NONNULL;
static void liq_remapping_result_destroy(liq_remapping_result *result) LIQ_NONNULL;
static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **) LIQ_NONNULL;
static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) LIQ_NONNULL;
LIQ_NONNULL static void liq_verbose_printf(const liq_attr *context, const char *fmt, ...)
{
if (context->log_callback) {
va_list va;
va_start(va, fmt);
int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0
va_end(va);
#if defined(_MSC_VER)
char *buf = malloc(required_space);
#else
char buf[required_space];
#endif
va_start(va, fmt);
vsnprintf(buf, required_space, fmt, va);
va_end(va);
context->log_callback(context, buf, context->log_callback_user_info);
#if defined(_MSC_VER)
free(buf);
#endif
}
}
LIQ_NONNULL inline static void verbose_print(const liq_attr *attr, const char *msg)
{
if (attr->log_callback) {
attr->log_callback(attr, msg, attr->log_callback_user_info);
}
}
LIQ_NONNULL static void liq_verbose_printf_flush(liq_attr *attr)
{
if (attr->log_flush_callback) {
attr->log_flush_callback(attr, attr->log_flush_callback_user_info);
}
}
LIQ_NONNULL static bool liq_progress(const liq_attr *attr, const float percent)
{
return attr->progress_callback && !attr->progress_callback(percent, attr->progress_callback_user_info);
}
LIQ_NONNULL static bool liq_remap_progress(const liq_remapping_result *quant, const float percent)
{
return quant->progress_callback && !quant->progress_callback(percent, quant->progress_callback_user_info);
}
#if USE_SSE
inline static bool is_sse_available()
{
#if (defined(__x86_64__) || defined(__amd64) || defined(_WIN64))
return true;
#elif _MSC_VER
int info[4];
__cpuid(info, 1);
/* bool is implemented as a built-in type of size 1 in MSVC */
return info[3] & (1<<26) ? true : false;
#else
int a,b,c,d;
cpuid(1, a, b, c, d);
return d & (1<<25); // edx bit 25 is set when SSE is present
#endif
}
#endif
/* make it clear in backtrace when user-supplied handle points to invalid memory */
NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header);
LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header)
{
if (!user_supplied_pointer) {
return false;
}
if (user_supplied_pointer->magic_header == liq_freed_magic) {
fprintf(stderr, "%s used after being freed", expected_magic_header);
// this is not normal error handling, this is programmer error that should crash the program.
// program cannot safely continue if memory has been used after it's been freed.
// abort() is nasty, but security vulnerability may be worse.
abort();
}
return user_supplied_pointer->magic_header == expected_magic_header;
}
NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer);
LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer)
{
if (!pointer) {
return false;
}
// Force a read from the given (potentially invalid) memory location in order to check early whether this crashes the program or not.
// It doesn't matter what value is read, the code here is just to shut the compiler up about unused read.
char test_access = *((volatile char *)pointer);
return test_access || true;
}
LIQ_NONNULL static void liq_log_error(const liq_attr *attr, const char *msg)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
liq_verbose_printf(attr, " error: %s", msg);
}
static double quality_to_mse(long quality)
{
if (quality == 0) {
return MAX_DIFF;
}
if (quality == 100) {
return 0;
}
// curve fudged to be roughly similar to quality of libjpeg
// except lowest 10 for really low number of colors
const double extra_low_quality_fudge = MAX(0,0.016/(0.001+quality) - 0.001);
return extra_low_quality_fudge + 2.5/pow(210.0 + quality, 1.2) * (100.1-quality)/100.0;
}
static unsigned int mse_to_quality(double mse)
{
for(int i=100; i > 0; i--) {
if (mse <= quality_to_mse(i) + 0.000001) { // + epsilon for floating point errors
return i;
}
}
return 0;
}
/** internally MSE is a sum of all channels with pixels 0..1 range,
but other software gives per-RGB-channel MSE for 0..255 range */
static double mse_to_standard_mse(double mse) {
return mse * 65536.0/6.0;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_quality(liq_attr* attr, int minimum, int target)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (target < 0 || target > 100 || target < minimum || minimum < 0) return LIQ_VALUE_OUT_OF_RANGE;
attr->target_mse = quality_to_mse(target);
attr->max_mse = quality_to_mse(minimum);
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_min_quality(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return mse_to_quality(attr->max_mse);
}
LIQ_EXPORT LIQ_NONNULL int liq_get_max_quality(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return mse_to_quality(attr->target_mse);
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_max_colors(liq_attr* attr, int colors)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (colors < 2 || colors > 256) return LIQ_VALUE_OUT_OF_RANGE;
attr->max_colors = colors;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_max_colors(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->max_colors;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_posterization(liq_attr *attr, int bits)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (bits < 0 || bits > 4) return LIQ_VALUE_OUT_OF_RANGE;
attr->min_posterization_output = bits;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_min_posterization(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->min_posterization_output;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_speed(liq_attr* attr, int speed)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (speed < 1 || speed > 10) return LIQ_VALUE_OUT_OF_RANGE;
unsigned int iterations = MAX(8-speed, 0); iterations += iterations * iterations/2;
attr->kmeans_iterations = iterations;
attr->kmeans_iteration_limit = 1.0/(double)(1<<(23-speed));
attr->feedback_loop_trials = MAX(56-9*speed, 0);
attr->max_histogram_entries = (1<<17) + (1<<18)*(10-speed);
attr->min_posterization_input = (speed >= 8) ? 1 : 0;
attr->use_dither_map = (speed <= (omp_get_max_threads() > 1 ? 7 : 5)); // parallelized dither map might speed up floyd remapping
attr->use_contrast_maps = (speed <= 7) || attr->use_dither_map;
attr->speed = speed;
attr->progress_stage1 = attr->use_contrast_maps ? 20 : 8;
if (attr->feedback_loop_trials < 2) attr->progress_stage1 += 30;
attr->progress_stage3 = 50 / (1+speed);
attr->progress_stage2 = 100 - attr->progress_stage1 - attr->progress_stage3;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_speed(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->speed;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_output_gamma(liq_result* res, double gamma)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER;
if (gamma <= 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE;
if (res->remapping) {
liq_remapping_result_destroy(res->remapping);
res->remapping = NULL;
}
res->gamma = gamma;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_opacity(liq_attr* attr, int min)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (min < 0 || min > 255) return LIQ_VALUE_OUT_OF_RANGE;
attr->min_opaque_val = (double)min/255.0;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_min_opacity(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return MIN(255, 256.0 * attr->min_opaque_val);
}
LIQ_EXPORT LIQ_NONNULL void liq_set_last_index_transparent(liq_attr* attr, int is_last)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->last_index_transparent = !!is_last;
}
LIQ_EXPORT void liq_attr_set_progress_callback(liq_attr *attr, liq_progress_callback_function *callback, void *user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->progress_callback = callback;
attr->progress_callback_user_info = user_info;
}
LIQ_EXPORT void liq_result_set_progress_callback(liq_result *result, liq_progress_callback_function *callback, void *user_info)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return;
result->progress_callback = callback;
result->progress_callback_user_info = user_info;
}
LIQ_EXPORT void liq_set_log_callback(liq_attr *attr, liq_log_callback_function *callback, void* user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
liq_verbose_printf_flush(attr);
attr->log_callback = callback;
attr->log_callback_user_info = user_info;
}
LIQ_EXPORT void liq_set_log_flush_callback(liq_attr *attr, liq_log_flush_callback_function *callback, void* user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->log_flush_callback = callback;
attr->log_flush_callback_user_info = user_info;
}
LIQ_EXPORT liq_attr* liq_attr_create()
{
return liq_attr_create_with_allocator(NULL, NULL);
}
LIQ_EXPORT LIQ_NONNULL void liq_attr_destroy(liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return;
}
liq_verbose_printf_flush(attr);
attr->magic_header = liq_freed_magic;
attr->free(attr);
}
LIQ_EXPORT LIQ_NONNULL liq_attr* liq_attr_copy(const liq_attr *orig)
{
if (!CHECK_STRUCT_TYPE(orig, liq_attr)) {
return NULL;
}
liq_attr *attr = orig->malloc(sizeof(liq_attr));
if (!attr) return NULL;
*attr = *orig;
return attr;
}
static void *liq_aligned_malloc(size_t size)
{
unsigned char *ptr = malloc(size + 16);
if (!ptr) {
return NULL;
}
uintptr_t offset = 16 - ((uintptr_t)ptr & 15); // also reserves 1 byte for ptr[-1]
ptr += offset;
assert(0 == (((uintptr_t)ptr) & 15));
ptr[-1] = offset ^ 0x59; // store how much pointer was shifted to get the original for free()
return ptr;
}
LIQ_NONNULL static void liq_aligned_free(void *inptr)
{
unsigned char *ptr = inptr;
size_t offset = ptr[-1] ^ 0x59;
assert(offset > 0 && offset <= 16);
free(ptr - offset);
}
LIQ_EXPORT liq_attr* liq_attr_create_with_allocator(void* (*custom_malloc)(size_t), void (*custom_free)(void*))
{
#if USE_SSE
if (!is_sse_available()) {
return NULL;
}
#endif
if (!custom_malloc && !custom_free) {
custom_malloc = liq_aligned_malloc;
custom_free = liq_aligned_free;
} else if (!custom_malloc != !custom_free) {
return NULL; // either specify both or none
}
liq_attr *attr = custom_malloc(sizeof(liq_attr));
if (!attr) return NULL;
*attr = (liq_attr) {
.magic_header = liq_attr_magic,
.malloc = custom_malloc,
.free = custom_free,
.max_colors = 256,
.min_opaque_val = 1, // whether preserve opaque colors for IE (1.0=no, does not affect alpha)
.last_index_transparent = false, // puts transparent color at last index. This is workaround for blu-ray subtitles.
.target_mse = 0,
.max_mse = MAX_DIFF,
};
liq_set_speed(attr, 3);
return attr;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_add_fixed_color(liq_image *img, liq_color color)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (img->fixed_colors_count > 255) return LIQ_UNSUPPORTED;
float gamma_lut[256];
to_f_set_gamma(gamma_lut, img->gamma);
img->fixed_colors[img->fixed_colors_count++] = rgba_to_f(gamma_lut, (rgba_pixel){
.r = color.r,
.g = color.g,
.b = color.b,
.a = color.a,
});
return LIQ_OK;
}
LIQ_NONNULL liq_error liq_histogram_add_fixed_color(liq_histogram *hist, f_pixel color)
{
if (hist->fixed_colors_count > 255) return LIQ_UNSUPPORTED;
hist->fixed_colors[hist->fixed_colors_count++] = color;
return LIQ_OK;
}
LIQ_NONNULL static bool liq_image_use_low_memory(liq_image *img)
{
img->temp_f_row = img->malloc(sizeof(img->f_pixels[0]) * img->width * omp_get_max_threads());
return img->temp_f_row != NULL;
}
LIQ_NONNULL static bool liq_image_should_use_low_memory(liq_image *img, const bool low_memory_hint)
{
return img->width * img->height > (low_memory_hint ? LIQ_HIGH_MEMORY_LIMIT/8 : LIQ_HIGH_MEMORY_LIMIT) / sizeof(f_pixel); // Watch out for integer overflow
}
static liq_image *liq_image_create_internal(const liq_attr *attr, rgba_pixel* rows[], liq_image_get_rgba_row_callback *row_callback, void *row_callback_user_info, int width, int height, double gamma)
{
if (gamma < 0 || gamma > 1.0) {
liq_log_error(attr, "gamma must be >= 0 and <= 1 (try 1/gamma instead)");
return NULL;
}
if (!rows && !row_callback) {
liq_log_error(attr, "missing row data");
return NULL;
}
liq_image *img = attr->malloc(sizeof(liq_image));
if (!img) return NULL;
*img = (liq_image){
.magic_header = liq_image_magic,
.malloc = attr->malloc,
.free = attr->free,
.width = width, .height = height,
.gamma = gamma ? gamma : 0.45455,
.rows = rows,
.row_callback = row_callback,
.row_callback_user_info = row_callback_user_info,
.min_opaque_val = attr->min_opaque_val,
};
if (!rows || attr->min_opaque_val < 1.f) {
img->temp_row = attr->malloc(sizeof(img->temp_row[0]) * width * omp_get_max_threads());
if (!img->temp_row) return NULL;
}
// if image is huge or converted pixels are not likely to be reused then don't cache converted pixels
if (liq_image_should_use_low_memory(img, !img->temp_row && !attr->use_contrast_maps && !attr->use_dither_map)) {
verbose_print(attr, " conserving memory");
if (!liq_image_use_low_memory(img)) return NULL;
}
if (img->min_opaque_val < 1.f) {
verbose_print(attr, " Working around IE6 bug by making image less transparent...");
}
return img;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_memory_ownership(liq_image *img, int ownership_flags)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (!img->rows || !ownership_flags || (ownership_flags & ~(LIQ_OWN_ROWS|LIQ_OWN_PIXELS))) {
return LIQ_VALUE_OUT_OF_RANGE;
}
if (ownership_flags & LIQ_OWN_ROWS) {
if (img->free_rows_internal) return LIQ_VALUE_OUT_OF_RANGE;
img->free_rows = true;
}
if (ownership_flags & LIQ_OWN_PIXELS) {
img->free_pixels = true;
if (!img->pixels) {
// for simplicity of this API there's no explicit bitmap argument,
// so the row with the lowest address is assumed to be at the start of the bitmap
img->pixels = img->rows[0];
for(unsigned int i=1; i < img->height; i++) {
img->pixels = MIN(img->pixels, img->rows[i]);
}
}
}
return LIQ_OK;
}
LIQ_NONNULL static bool check_image_size(const liq_attr *attr, const int width, const int height)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return false;
}
if (width <= 0 || height <= 0) {
liq_log_error(attr, "width and height must be > 0");
return false;
}
if (width > INT_MAX/sizeof(rgba_pixel)/height || width > INT_MAX/16/sizeof(f_pixel) || height > INT_MAX/sizeof(size_t)) {
liq_log_error(attr, "image too large");
return false;
}
return true;
}
LIQ_EXPORT liq_image *liq_image_create_custom(const liq_attr *attr, liq_image_get_rgba_row_callback *row_callback, void* user_info, int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
return liq_image_create_internal(attr, NULL, row_callback, user_info, width, height, gamma);
}
LIQ_EXPORT liq_image *liq_image_create_rgba_rows(const liq_attr *attr, void *const rows[], int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
for(int i=0; i < height; i++) {
if (!CHECK_USER_POINTER(rows+i) || !CHECK_USER_POINTER(rows[i])) {
liq_log_error(attr, "invalid row pointers");
return NULL;
}
}
return liq_image_create_internal(attr, (rgba_pixel**)rows, NULL, NULL, width, height, gamma);
}
LIQ_EXPORT LIQ_NONNULL liq_image *liq_image_create_rgba(const liq_attr *attr, const void* bitmap, int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
if (!CHECK_USER_POINTER(bitmap)) {
liq_log_error(attr, "invalid bitmap pointer");
return NULL;
}
rgba_pixel *const pixels = (rgba_pixel *const)bitmap;
rgba_pixel **rows = attr->malloc(sizeof(rows[0])*height);
if (!rows) return NULL;
for(int i=0; i < height; i++) {
rows[i] = pixels + width * i;
}
liq_image *image = liq_image_create_internal(attr, rows, NULL, NULL, width, height, gamma);
if (!image) {
attr->free(rows);
return NULL;
}
image->free_rows = true;
image->free_rows_internal = true;
return image;
}
NEVER_INLINE LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info);
LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info)
{
assert(callback);
assert(temp_row);
callback(temp_row, row, width, user_info);
}
LIQ_NONNULL inline static bool liq_image_has_rgba_pixels(const liq_image *img)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) {
return false;
}
return img->rows || (img->temp_row && img->row_callback);
}
LIQ_NONNULL inline static bool liq_image_can_use_rgba_rows(const liq_image *img)
{
assert(liq_image_has_rgba_pixels(img));
const bool iebug = img->min_opaque_val < 1.f;
return (img->rows && !iebug);
}
LIQ_NONNULL static const rgba_pixel *liq_image_get_row_rgba(liq_image *img, unsigned int row)
{
if (liq_image_can_use_rgba_rows(img)) {
return img->rows[row];
}
assert(img->temp_row);
rgba_pixel *temp_row = img->temp_row + img->width * omp_get_thread_num();
if (img->rows) {
memcpy(temp_row, img->rows[row], img->width * sizeof(temp_row[0]));
} else {
liq_executing_user_callback(img->row_callback, (liq_color*)temp_row, row, img->width, img->row_callback_user_info);
}
if (img->min_opaque_val < 1.f) modify_alpha(img, temp_row);
return temp_row;
}
LIQ_NONNULL static void convert_row_to_f(liq_image *img, f_pixel *row_f_pixels, const unsigned int row, const float gamma_lut[])
{
assert(row_f_pixels);
#ifndef _MSC_VER
assert(!USE_SSE || 0 == ((uintptr_t)row_f_pixels & 15));
#endif
const rgba_pixel *const row_pixels = liq_image_get_row_rgba(img, row);
for(unsigned int col=0; col < img->width; col++) {
row_f_pixels[col] = rgba_to_f(gamma_lut, row_pixels[col]);
}
}
LIQ_NONNULL static const f_pixel *liq_image_get_row_f(liq_image *img, unsigned int row)
{
if (!img->f_pixels) {
if (img->temp_f_row) {
float gamma_lut[256];
to_f_set_gamma(gamma_lut, img->gamma);
f_pixel *row_for_thread = img->temp_f_row + img->width * omp_get_thread_num();
convert_row_to_f(img, row_for_thread, row, gamma_lut);
return row_for_thread;
}
assert(omp_get_thread_num() == 0);
if (!liq_image_should_use_low_memory(img, false)) {
img->f_pixels = img->malloc(sizeof(img->f_pixels[0]) * img->width * img->height);
}
if (!img->f_pixels) {
if (!liq_image_use_low_memory(img)) return NULL;
return liq_image_get_row_f(img, row);
}
float gamma_lut[256];
to_f_set_gamma(gamma_lut, img->gamma);
for(unsigned int i=0; i < img->height; i++) {
convert_row_to_f(img, &img->f_pixels[i*img->width], i, gamma_lut);
}
}
return img->f_pixels + img->width * row;
}
LIQ_EXPORT LIQ_NONNULL int liq_image_get_width(const liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1;
return input_image->width;
}
LIQ_EXPORT LIQ_NONNULL int liq_image_get_height(const liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1;
return input_image->height;
}
typedef void free_func(void*);
LIQ_NONNULL static free_func *get_default_free_func(liq_image *img)
{
// When default allocator is used then user-supplied pointers must be freed with free()
if (img->free_rows_internal || img->free != liq_aligned_free) {
return img->free;
}
return free;
}
LIQ_NONNULL static void liq_image_free_rgba_source(liq_image *input_image)
{
if (input_image->free_pixels && input_image->pixels) {
get_default_free_func(input_image)(input_image->pixels);
input_image->pixels = NULL;
}
if (input_image->free_rows && input_image->rows) {
get_default_free_func(input_image)(input_image->rows);
input_image->rows = NULL;
}
}
LIQ_EXPORT LIQ_NONNULL void liq_image_destroy(liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return;
liq_image_free_rgba_source(input_image);
if (input_image->noise) {
input_image->free(input_image->noise);
}
if (input_image->edges) {
input_image->free(input_image->edges);
}
if (input_image->dither_map) {
input_image->free(input_image->dither_map);
}
if (input_image->f_pixels) {
input_image->free(input_image->f_pixels);
}
if (input_image->temp_row) {
input_image->free(input_image->temp_row);
}
if (input_image->temp_f_row) {
input_image->free(input_image->temp_f_row);
}
input_image->magic_header = liq_freed_magic;
input_image->free(input_image);
}
LIQ_EXPORT liq_histogram* liq_histogram_create(const liq_attr* attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return NULL;
}
liq_histogram *hist = attr->malloc(sizeof(liq_histogram));
if (!hist) return NULL;
*hist = (liq_histogram) {
.magic_header = liq_histogram_magic,
.malloc = attr->malloc,
.free = attr->free,
.ignorebits = MAX(attr->min_posterization_output, attr->min_posterization_input),
};
return hist;
}
LIQ_EXPORT LIQ_NONNULL void liq_histogram_destroy(liq_histogram *hist)
{
if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return;
hist->magic_header = liq_freed_magic;
pam_freeacolorhash(hist->acht);
hist->free(hist);
}
LIQ_EXPORT LIQ_NONNULL liq_result *liq_quantize_image(liq_attr *attr, liq_image *img)
{
liq_result *res;
if (LIQ_OK != liq_image_quantize(img, attr, &res)) {
return NULL;
}
return res;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_quantize(liq_image *const img, liq_attr *const attr, liq_result **result_output)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (!liq_image_has_rgba_pixels(img)) {
return LIQ_UNSUPPORTED;
}
liq_histogram *hist = liq_histogram_create(attr);
if (!hist) {
return LIQ_OUT_OF_MEMORY;
}
liq_error err = liq_histogram_add_image(hist, attr, img);
if (LIQ_OK != err) {
return err;
}
err = liq_histogram_quantize_internal(hist, attr, false, result_output);
liq_histogram_destroy(hist);
return err;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_quantize(liq_histogram *input_hist, liq_attr *attr, liq_result **result_output) {
return liq_histogram_quantize_internal(input_hist, attr, true, result_output);
}
LIQ_NONNULL static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output)
{
if (!CHECK_USER_POINTER(result_output)) return LIQ_INVALID_POINTER;
*result_output = NULL;
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER;
if (liq_progress(attr, 0)) return LIQ_ABORTED;
histogram *hist;
liq_error err = finalize_histogram(input_hist, attr, &hist);
if (err != LIQ_OK) {
return err;
}
err = pngquant_quantize(hist, attr, input_hist->fixed_colors_count, input_hist->fixed_colors, input_hist->gamma, fixed_result_colors, result_output);
pam_freeacolorhist(hist);
return err;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_dithering_level(liq_result *res, float dither_level)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER;
if (res->remapping) {
liq_remapping_result_destroy(res->remapping);
res->remapping = NULL;
}
if (res->dither_level < 0 || res->dither_level > 1.0f) return LIQ_VALUE_OUT_OF_RANGE;
res->dither_level = dither_level;
return LIQ_OK;
}
LIQ_NONNULL static liq_remapping_result *liq_remapping_result_create(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) {
return NULL;
}
liq_remapping_result *res = result->malloc(sizeof(liq_remapping_result));
if (!res) return NULL;
*res = (liq_remapping_result) {
.magic_header = liq_remapping_result_magic,
.malloc = result->malloc,
.free = result->free,
.dither_level = result->dither_level,
.use_dither_map = result->use_dither_map,
.palette_error = result->palette_error,
.gamma = result->gamma,
.palette = pam_duplicate_colormap(result->palette),
.progress_callback = result->progress_callback,
.progress_callback_user_info = result->progress_callback_user_info,
.progress_stage1 = result->use_dither_map ? 20 : 0,
};
return res;
}
LIQ_EXPORT LIQ_NONNULL double liq_get_output_gamma(const liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
return result->gamma;
}
LIQ_NONNULL static void liq_remapping_result_destroy(liq_remapping_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_remapping_result)) return;
if (result->palette) pam_freecolormap(result->palette);
if (result->pixels) result->free(result->pixels);
result->magic_header = liq_freed_magic;
result->free(result);
}
LIQ_EXPORT LIQ_NONNULL void liq_result_destroy(liq_result *res)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return;
memset(&res->int_palette, 0, sizeof(liq_palette));
if (res->remapping) {
memset(&res->remapping->int_palette, 0, sizeof(liq_palette));
liq_remapping_result_destroy(res->remapping);
}
pam_freecolormap(res->palette);
res->magic_header = liq_freed_magic;
res->free(res);
}
LIQ_EXPORT LIQ_NONNULL double liq_get_quantization_error(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->palette_error >= 0) {
return mse_to_standard_mse(result->palette_error);
}
return -1;
}
LIQ_EXPORT LIQ_NONNULL double liq_get_remapping_error(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->remapping && result->remapping->palette_error >= 0) {
return mse_to_standard_mse(result->remapping->palette_error);
}
return -1;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_quantization_quality(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->palette_error >= 0) {
return mse_to_quality(result->palette_error);
}
return -1;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_remapping_quality(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->remapping && result->remapping->palette_error >= 0) {
return mse_to_quality(result->remapping->palette_error);
}
return -1;
}
LIQ_NONNULL static int compare_popularity(const void *ch1, const void *ch2)
{
const float v1 = ((const colormap_item*)ch1)->popularity;
const float v2 = ((const colormap_item*)ch2)->popularity;
return v1 > v2 ? -1 : 1;
}
LIQ_NONNULL static void sort_palette_qsort(colormap *map, int start, int nelem)
{
if (!nelem) return;
qsort(map->palette + start, nelem, sizeof(map->palette[0]), compare_popularity);
}
#define SWAP_PALETTE(map, a,b) { \
const colormap_item tmp = (map)->palette[(a)]; \
(map)->palette[(a)] = (map)->palette[(b)]; \
(map)->palette[(b)] = tmp; }
LIQ_NONNULL static void sort_palette(colormap *map, const liq_attr *options)
{
/*
** Step 3.5 [GRR]: remap the palette colors so that all entries with
** the maximal alpha value (i.e., fully opaque) are at the end and can
** therefore be omitted from the tRNS chunk.
*/
if (options->last_index_transparent) {
for(unsigned int i=0; i < map->colors; i++) {
if (map->palette[i].acolor.a < 1.0/256.0) {
const unsigned int old = i, transparent_dest = map->colors-1;
SWAP_PALETTE(map, transparent_dest, old);
/* colors sorted by popularity make pngs slightly more compressible */
sort_palette_qsort(map, 0, map->colors-1);
return;
}
}
}
unsigned int non_fixed_colors = 0;
for(unsigned int i = 0; i < map->colors; i++) {
if (map->palette[i].fixed) {
break;
}
non_fixed_colors++;
}
/* move transparent colors to the beginning to shrink trns chunk */
unsigned int num_transparent = 0;
for(unsigned int i = 0; i < non_fixed_colors; i++) {
if (map->palette[i].acolor.a < 255.0/256.0) {
// current transparent color is swapped with earlier opaque one
if (i != num_transparent) {
SWAP_PALETTE(map, num_transparent, i);
i--;
}
num_transparent++;
}
}
liq_verbose_printf(options, " eliminated opaque tRNS-chunk entries...%d entr%s transparent", num_transparent, (num_transparent == 1)? "y" : "ies");
/* colors sorted by popularity make pngs slightly more compressible
* opaque and transparent are sorted separately
*/
sort_palette_qsort(map, 0, num_transparent);
sort_palette_qsort(map, num_transparent, non_fixed_colors - num_transparent);
if (non_fixed_colors > 9 && map->colors > 16) {
SWAP_PALETTE(map, 7, 1); // slightly improves compression
SWAP_PALETTE(map, 8, 2);
SWAP_PALETTE(map, 9, 3);
}
}
inline static unsigned int posterize_channel(unsigned int color, unsigned int bits)
{
return (color & ~((1<<bits)-1)) | (color >> (8-bits));
}
LIQ_NONNULL static void set_rounded_palette(liq_palette *const dest, colormap *const map, const double gamma, unsigned int posterize)
{
float gamma_lut[256];
to_f_set_gamma(gamma_lut, gamma);
dest->count = map->colors;
for(unsigned int x = 0; x < map->colors; ++x) {
rgba_pixel px = f_to_rgb(gamma, map->palette[x].acolor);
px.r = posterize_channel(px.r, posterize);
px.g = posterize_channel(px.g, posterize);
px.b = posterize_channel(px.b, posterize);
px.a = posterize_channel(px.a, posterize);
map->palette[x].acolor = rgba_to_f(gamma_lut, px); /* saves rounding error introduced by to_rgb, which makes remapping & dithering more accurate */
if (!px.a && !map->palette[x].fixed) {
px.r = 71; px.g = 112; px.b = 76;
}
dest->entries[x] = (liq_color){.r=px.r,.g=px.g,.b=px.b,.a=px.a};
}
}
LIQ_EXPORT LIQ_NONNULL const liq_palette *liq_get_palette(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return NULL;
if (result->remapping && result->remapping->int_palette.count) {
return &result->remapping->int_palette;
}
if (!result->int_palette.count) {
set_rounded_palette(&result->int_palette, result->palette, result->gamma, result->min_posterization_output);
}
return &result->int_palette;
}
LIQ_NONNULL static float remap_to_palette(liq_image *const input_image, unsigned char *const *const output_pixels, colormap *const map)
{
const int rows = input_image->height;
const unsigned int cols = input_image->width;
double remapping_error=0;
if (!liq_image_get_row_f(input_image, 0)) { // trigger lazy conversion
return -1;
}
struct nearest_map *const n = nearest_init(map);
const unsigned int max_threads = omp_get_max_threads();
kmeans_state *average_color = malloc((KMEANS_CACHE_LINE_GAP+map->colors) * max_threads * sizeof(kmeans_state));
kmeans_init(map, max_threads, average_color);
int row;
#pragma omp parallel for if (rows*cols > 3000) \
schedule(static) default(none) shared(average_color) reduction(+:remapping_error)
for(row = 0; row < rows; ++row) {
const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row);
unsigned int last_match=0;
for(unsigned int col = 0; col < cols; ++col) {
float diff;
output_pixels[row][col] = last_match = nearest_search(n, &row_pixels[col], last_match, &diff);
remapping_error += diff;
kmeans_update_color(row_pixels[col], 1.0, map, last_match, omp_get_thread_num(), average_color);
}
}
kmeans_finalize(map, max_threads, average_color);
nearest_free(n);
free(average_color);
return remapping_error / (input_image->width * input_image->height);
}
inline static f_pixel get_dithered_pixel(const float dither_level, const float max_dither_error, const f_pixel thiserr, const f_pixel px)
{
/* Use Floyd-Steinberg errors to adjust actual color. */
const float sr = thiserr.r * dither_level,
sg = thiserr.g * dither_level,
sb = thiserr.b * dither_level,
sa = thiserr.a * dither_level;
float ratio = 1.0;
const float max_overflow = 1.1f;
const float max_underflow = -0.1f;
// allowing some overflow prevents undithered bands caused by clamping of all channels
if (px.r + sr > max_overflow) ratio = MIN(ratio, (max_overflow -px.r)/sr);
else { if (px.r + sr < max_underflow) ratio = MIN(ratio, (max_underflow-px.r)/sr); }
if (px.g + sg > max_overflow) ratio = MIN(ratio, (max_overflow -px.g)/sg);
else { if (px.g + sg < max_underflow) ratio = MIN(ratio, (max_underflow-px.g)/sg); }
if (px.b + sb > max_overflow) ratio = MIN(ratio, (max_overflow -px.b)/sb);
else { if (px.b + sb < max_underflow) ratio = MIN(ratio, (max_underflow-px.b)/sb); }
float a = px.a + sa;
if (a > 1.0) { a = 1.0; }
else if (a < 0) { a = 0; }
// If dithering error is crazy high, don't propagate it that much
// This prevents crazy geen pixels popping out of the blue (or red or black! ;)
const float dither_error = sr*sr + sg*sg + sb*sb + sa*sa;
if (dither_error > max_dither_error) {
ratio *= 0.8;
} else if (dither_error < 2.f/256.f/256.f) {
// don't dither areas that don't have noticeable error — makes file smaller
return px;
}
return (f_pixel){
.r=px.r + sr * ratio,
.g=px.g + sg * ratio,
.b=px.b + sb * ratio,
.a=a,
};
}
/**
Uses edge/noise map to apply dithering only to flat areas. Dithering on edges creates jagged lines, and noisy areas are "naturally" dithered.
If output_image_is_remapped is true, only pixels noticeably changed by error diffusion will be written to output image.
*/
LIQ_NONNULL static bool remap_to_palette_floyd(liq_image *input_image, unsigned char *const output_pixels[], liq_remapping_result *quant, const float max_dither_error, const bool output_image_is_remapped)
{
const int rows = input_image->height, cols = input_image->width;
const unsigned char *dither_map = quant->use_dither_map ? (input_image->dither_map ? input_image->dither_map : input_image->edges) : NULL;
const colormap *map = quant->palette;
const colormap_item *acolormap = map->palette;
/* Initialize Floyd-Steinberg error vectors. */
const size_t errwidth = cols+2;
f_pixel *restrict thiserr = input_image->malloc(errwidth * sizeof(thiserr[0]) * 2); // +2 saves from checking out of bounds access
if (!thiserr) return false;
f_pixel *restrict nexterr = thiserr + errwidth;
memset(thiserr, 0, errwidth * sizeof(thiserr[0]));
bool ok = true;
struct nearest_map *const n = nearest_init(map);
// response to this value is non-linear and without it any value < 0.8 would give almost no dithering
float base_dithering_level = quant->dither_level;
base_dithering_level = 1.0 - (1.0-base_dithering_level)*(1.0-base_dithering_level);
if (dither_map) {
base_dithering_level *= 1.0/255.0; // convert byte to float
}
base_dithering_level *= 15.0/16.0; // prevent small errors from accumulating
int fs_direction = 1;
unsigned int last_match=0;
for (int row = 0; row < rows; ++row) {
if (liq_remap_progress(quant, quant->progress_stage1 + row * (100.f - quant->progress_stage1) / rows)) {
ok = false;
break;
}
memset(nexterr, 0, errwidth * sizeof(nexterr[0]));
int col = (fs_direction > 0) ? 0 : (cols - 1);
const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row);
do {
float dither_level = base_dithering_level;
if (dither_map) {
dither_level *= dither_map[row*cols + col];
}
const f_pixel spx = get_dithered_pixel(dither_level, max_dither_error, thiserr[col + 1], row_pixels[col]);
const unsigned int guessed_match = output_image_is_remapped ? output_pixels[row][col] : last_match;
output_pixels[row][col] = last_match = nearest_search(n, &spx, guessed_match, NULL);
const f_pixel output_px = acolormap[last_match].acolor;
f_pixel err = {
.r = (spx.r - output_px.r),
.g = (spx.g - output_px.g),
.b = (spx.b - output_px.b),
.a = (spx.a - output_px.a),
};
// If dithering error is crazy high, don't propagate it that much
// This prevents crazy geen pixels popping out of the blue (or red or black! ;)
if (err.r*err.r + err.g*err.g + err.b*err.b + err.a*err.a > max_dither_error) {
err.r *= 0.75;
err.g *= 0.75;
err.b *= 0.75;
err.a *= 0.75;
}
/* Propagate Floyd-Steinberg error terms. */
if (fs_direction > 0) {
thiserr[col + 2].a += err.a * (7.f/16.f);
thiserr[col + 2].r += err.r * (7.f/16.f);
thiserr[col + 2].g += err.g * (7.f/16.f);
thiserr[col + 2].b += err.b * (7.f/16.f);
nexterr[col + 2].a = err.a * (1.f/16.f);
nexterr[col + 2].r = err.r * (1.f/16.f);
nexterr[col + 2].g = err.g * (1.f/16.f);
nexterr[col + 2].b = err.b * (1.f/16.f);
nexterr[col + 1].a += err.a * (5.f/16.f);
nexterr[col + 1].r += err.r * (5.f/16.f);
nexterr[col + 1].g += err.g * (5.f/16.f);
nexterr[col + 1].b += err.b * (5.f/16.f);
nexterr[col ].a += err.a * (3.f/16.f);
nexterr[col ].r += err.r * (3.f/16.f);
nexterr[col ].g += err.g * (3.f/16.f);
nexterr[col ].b += err.b * (3.f/16.f);
} else {
thiserr[col ].a += err.a * (7.f/16.f);
thiserr[col ].r += err.r * (7.f/16.f);
thiserr[col ].g += err.g * (7.f/16.f);
thiserr[col ].b += err.b * (7.f/16.f);
nexterr[col ].a = err.a * (1.f/16.f);
nexterr[col ].r = err.r * (1.f/16.f);
nexterr[col ].g = err.g * (1.f/16.f);
nexterr[col ].b = err.b * (1.f/16.f);
nexterr[col + 1].a += err.a * (5.f/16.f);
nexterr[col + 1].r += err.r * (5.f/16.f);
nexterr[col + 1].g += err.g * (5.f/16.f);
nexterr[col + 1].b += err.b * (5.f/16.f);
nexterr[col + 2].a += err.a * (3.f/16.f);
nexterr[col + 2].r += err.r * (3.f/16.f);
nexterr[col + 2].g += err.g * (3.f/16.f);
nexterr[col + 2].b += err.b * (3.f/16.f);
}
// remapping is done in zig-zag
col += fs_direction;
if (fs_direction > 0) {
if (col >= cols) break;
} else {
if (col < 0) break;
}
} while(1);
f_pixel *const temperr = thiserr;
thiserr = nexterr;
nexterr = temperr;
fs_direction = -fs_direction;
}
input_image->free(MIN(thiserr, nexterr)); // MIN because pointers were swapped
nearest_free(n);
return ok;
}
/* fixed colors are always included in the palette, so it would be wasteful to duplicate them in palette from histogram */
LIQ_NONNULL static void remove_fixed_colors_from_histogram(histogram *hist, const int fixed_colors_count, const f_pixel fixed_colors[], const float target_mse)
{
const float max_difference = MAX(target_mse/2.0, 2.0/256.0/256.0);
if (fixed_colors_count) {
for(int j=0; j < hist->size; j++) {
for(unsigned int i=0; i < fixed_colors_count; i++) {
if (colordifference(hist->achv[j].acolor, fixed_colors[i]) < max_difference) {
hist->achv[j] = hist->achv[--hist->size]; // remove color from histogram by overwriting with the last entry
j--; break; // continue searching histogram
}
}
}
}
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_colors(liq_histogram *input_hist, const liq_attr *options, const liq_histogram_entry entries[], int num_entries, double gamma)
{
if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER;
if (!CHECK_USER_POINTER(entries)) return LIQ_INVALID_POINTER;
if (gamma < 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE;
if (num_entries <= 0 || num_entries > 1<<30) return LIQ_VALUE_OUT_OF_RANGE;
if (input_hist->ignorebits > 0 && input_hist->had_image_added) {
return LIQ_UNSUPPORTED;
}
input_hist->ignorebits = 0;
input_hist->had_image_added = true;
input_hist->gamma = gamma ? gamma : 0.45455;
if (!input_hist->acht) {
input_hist->acht = pam_allocacolorhash(~0, num_entries*num_entries, 0, options->malloc, options->free);
if (!input_hist->acht) {
return LIQ_OUT_OF_MEMORY;
}
}
// Fake image size. It's only for hash size estimates.
if (!input_hist->acht->cols) {
input_hist->acht->cols = num_entries;
}
input_hist->acht->rows += num_entries;
const unsigned int hash_size = input_hist->acht->hash_size;
for(int i=0; i < num_entries; i++) {
const rgba_pixel rgba = {
.r = entries[i].color.r,
.g = entries[i].color.g,
.b = entries[i].color.b,
.a = entries[i].color.a,
};
union rgba_as_int px = {rgba};
unsigned int hash;
if (px.rgba.a) {
hash = px.l % hash_size;
} else {
hash=0; px.l=0;
}
if (!pam_add_to_hash(input_hist->acht, hash, entries[i].count, px, i, num_entries)) {
return LIQ_OUT_OF_MEMORY;
}
}
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_image(liq_histogram *input_hist, const liq_attr *options, liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER;
const unsigned int cols = input_image->width, rows = input_image->height;
if (!input_image->noise && options->use_contrast_maps) {
contrast_maps(input_image);
}
input_hist->gamma = input_image->gamma;
for(int i = 0; i < input_image->fixed_colors_count; i++) {
liq_error res = liq_histogram_add_fixed_color(input_hist, input_image->fixed_colors[i]);
if (res != LIQ_OK) {
return res;
}
}
/*
** Step 2: attempt to make a histogram of the colors, unclustered.
** If at first we don't succeed, increase ignorebits to increase color
** coherence and try again.
*/
if (liq_progress(options, options->progress_stage1 * 0.4f)) return LIQ_ABORTED;
const bool all_rows_at_once = liq_image_can_use_rgba_rows(input_image);
// Usual solution is to start from scratch when limit is exceeded, but that's not possible if it's not
// the first image added
const unsigned int max_histogram_entries = input_hist->had_image_added ? ~0 : options->max_histogram_entries;
do {
if (!input_hist->acht) {
input_hist->acht = pam_allocacolorhash(max_histogram_entries, rows*cols, input_hist->ignorebits, options->malloc, options->free);
}
if (!input_hist->acht) return LIQ_OUT_OF_MEMORY;
// histogram uses noise contrast map for importance. Color accuracy in noisy areas is not very important.
// noise map does not include edges to avoid ruining anti-aliasing
for(unsigned int row=0; row < rows; row++) {
bool added_ok;
if (all_rows_at_once) {
added_ok = pam_computeacolorhash(input_hist->acht, (const rgba_pixel *const *)input_image->rows, cols, rows, input_image->noise);
if (added_ok) break;
} else {
const rgba_pixel* rows_p[1] = { liq_image_get_row_rgba(input_image, row) };
added_ok = pam_computeacolorhash(input_hist->acht, rows_p, cols, 1, input_image->noise ? &input_image->noise[row * cols] : NULL);
}
if (!added_ok) {
input_hist->ignorebits++;
liq_verbose_printf(options, " too many colors! Scaling colors to improve clustering... %d", input_hist->ignorebits);
pam_freeacolorhash(input_hist->acht);
input_hist->acht = NULL;
if (liq_progress(options, options->progress_stage1 * 0.6f)) return LIQ_ABORTED;
break;
}
}
} while(!input_hist->acht);
input_hist->had_image_added = true;
if (input_image->noise) {
input_image->free(input_image->noise);
input_image->noise = NULL;
}
if (input_image->free_pixels && input_image->f_pixels) {
liq_image_free_rgba_source(input_image); // bow can free the RGBA source if copy has been made in f_pixels
}
return LIQ_OK;
}
LIQ_NONNULL static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output)
{
if (liq_progress(options, options->progress_stage1 * 0.9f)) {
return LIQ_ABORTED;
}
if (!input_hist->acht) {
return LIQ_BITMAP_NOT_AVAILABLE;
}
histogram *hist = pam_acolorhashtoacolorhist(input_hist->acht, input_hist->gamma, options->malloc, options->free);
pam_freeacolorhash(input_hist->acht);
input_hist->acht = NULL;
if (!hist) {
return LIQ_OUT_OF_MEMORY;
}
liq_verbose_printf(options, " made histogram...%d colors found", hist->size);
remove_fixed_colors_from_histogram(hist, input_hist->fixed_colors_count, input_hist->fixed_colors, options->target_mse);
*hist_output = hist;
return LIQ_OK;
}
LIQ_NONNULL static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels)
{
/* IE6 makes colors with even slightest transparency completely transparent,
thus to improve situation in IE, make colors that are less than ~10% transparent
completely opaque */
const float min_opaque_val = input_image->min_opaque_val;
const float almost_opaque_val = min_opaque_val * 169.f/256.f;
const unsigned int almost_opaque_val_int = (min_opaque_val * 169.f/256.f)*255.f;
for(unsigned int col = 0; col < input_image->width; col++) {
const rgba_pixel px = row_pixels[col];
/* ie bug: to avoid visible step caused by forced opaqueness, linearily raise opaqueness of almost-opaque colors */
if (px.a >= almost_opaque_val_int) {
float al = px.a / 255.f;
al = almost_opaque_val + (al-almost_opaque_val) * (1.f-almost_opaque_val) / (min_opaque_val-almost_opaque_val);
al *= 256.f;
row_pixels[col].a = al >= 255.f ? 255 : al;
}
}
}
/**
Builds two maps:
noise - approximation of areas with high-frequency noise, except straight edges. 1=flat, 0=noisy.
edges - noise map including all edges
*/
LIQ_NONNULL static void contrast_maps(liq_image *image)
{
const unsigned int cols = image->width, rows = image->height;
if (cols < 4 || rows < 4 || (3*cols*rows) > LIQ_HIGH_MEMORY_LIMIT) {
return;
}
unsigned char *restrict noise = image->noise ? image->noise : image->malloc(cols*rows);
image->noise = NULL;
unsigned char *restrict edges = image->edges ? image->edges : image->malloc(cols*rows);
image->edges = NULL;
unsigned char *restrict tmp = image->malloc(cols*rows);
if (!noise || !edges || !tmp) {
image->free(noise);
image->free(edges);
image->free(tmp);
return;
}
const f_pixel *curr_row, *prev_row, *next_row;
curr_row = prev_row = next_row = liq_image_get_row_f(image, 0);
for (unsigned int j=0; j < rows; j++) {
prev_row = curr_row;
curr_row = next_row;
next_row = liq_image_get_row_f(image, MIN(rows-1,j+1));
f_pixel prev, curr = curr_row[0], next=curr;
for (unsigned int i=0; i < cols; i++) {
prev=curr;
curr=next;
next = curr_row[MIN(cols-1,i+1)];
// contrast is difference between pixels neighbouring horizontally and vertically
const float a = fabsf(prev.a+next.a - curr.a*2.f),
r = fabsf(prev.r+next.r - curr.r*2.f),
g = fabsf(prev.g+next.g - curr.g*2.f),
b = fabsf(prev.b+next.b - curr.b*2.f);
const f_pixel prevl = prev_row[i];
const f_pixel nextl = next_row[i];
const float a1 = fabsf(prevl.a+nextl.a - curr.a*2.f),
r1 = fabsf(prevl.r+nextl.r - curr.r*2.f),
g1 = fabsf(prevl.g+nextl.g - curr.g*2.f),
b1 = fabsf(prevl.b+nextl.b - curr.b*2.f);
const float horiz = MAX(MAX(a,r),MAX(g,b));
const float vert = MAX(MAX(a1,r1),MAX(g1,b1));
const float edge = MAX(horiz,vert);
float z = edge - fabsf(horiz-vert)*.5f;
z = 1.f - MAX(z,MIN(horiz,vert));
z *= z; // noise is amplified
z *= z;
z *= 256.f;
noise[j*cols+i] = z < 256 ? z : 255;
z = (1.f-edge)*256.f;
edges[j*cols+i] = z > 0 ? (z < 256 ? z : 255) : 0;
}
}
// noise areas are shrunk and then expanded to remove thin edges from the map
liq_max3(noise, tmp, cols, rows);
liq_max3(tmp, noise, cols, rows);
liq_blur(noise, tmp, noise, cols, rows, 3);
liq_max3(noise, tmp, cols, rows);
liq_min3(tmp, noise, cols, rows);
liq_min3(noise, tmp, cols, rows);
liq_min3(tmp, noise, cols, rows);
liq_min3(edges, tmp, cols, rows);
liq_max3(tmp, edges, cols, rows);
for(unsigned int i=0; i < cols*rows; i++) edges[i] = MIN(noise[i], edges[i]);
image->free(tmp);
image->noise = noise;
image->edges = edges;
}
/**
* Builds map of neighbor pixels mapped to the same palette entry
*
* For efficiency/simplicity it mainly looks for same consecutive pixels horizontally
* and peeks 1 pixel above/below. Full 2d algorithm doesn't improve it significantly.
* Correct flood fill doesn't have visually good properties.
*/
LIQ_NONNULL static void update_dither_map(unsigned char *const *const row_pointers, liq_image *input_image)
{
const unsigned int width = input_image->width;
const unsigned int height = input_image->height;
unsigned char *const edges = input_image->edges;
for(unsigned int row=0; row < height; row++) {
unsigned char lastpixel = row_pointers[row][0];
unsigned int lastcol=0;
for(unsigned int col=1; col < width; col++) {
const unsigned char px = row_pointers[row][col];
if (px != lastpixel || col == width-1) {
int neighbor_count = 10 * (col-lastcol);
unsigned int i=lastcol;
while(i < col) {
if (row > 0) {
unsigned char pixelabove = row_pointers[row-1][i];
if (pixelabove == lastpixel) neighbor_count += 15;
}
if (row < height-1) {
unsigned char pixelbelow = row_pointers[row+1][i];
if (pixelbelow == lastpixel) neighbor_count += 15;
}
i++;
}
while(lastcol <= col) {
int e = edges[row*width + lastcol];
edges[row*width + lastcol++] = (e+128) * (255.f/(255+128)) * (1.f - 20.f / (20 + neighbor_count));
}
lastpixel = px;
}
}
}
input_image->dither_map = input_image->edges;
input_image->edges = NULL;
}
/**
* Palette can be NULL, in which case it creates a new palette from scratch.
*/
static colormap *add_fixed_colors_to_palette(colormap *palette, const int max_colors, const f_pixel fixed_colors[], const int fixed_colors_count, void* (*malloc)(size_t), void (*free)(void*))
{
if (!fixed_colors_count) return palette;
colormap *newpal = pam_colormap(MIN(max_colors, (palette ? palette->colors : 0) + fixed_colors_count), malloc, free);
unsigned int i=0;
if (palette && fixed_colors_count < max_colors) {
unsigned int palette_max = MIN(palette->colors, max_colors - fixed_colors_count);
for(; i < palette_max; i++) {
newpal->palette[i] = palette->palette[i];
}
}
for(int j=0; j < MIN(max_colors, fixed_colors_count); j++) {
newpal->palette[i++] = (colormap_item){
.acolor = fixed_colors[j],
.fixed = true,
};
}
if (palette) pam_freecolormap(palette);
return newpal;
}
LIQ_NONNULL static void adjust_histogram_callback(hist_item *item, float diff)
{
item->adjusted_weight = (item->perceptual_weight+item->adjusted_weight) * (sqrtf(1.f+diff));
}
/**
Repeats mediancut with different histogram weights to find palette with minimum error.
feedback_loop_trials controls how long the search will take. < 0 skips the iteration.
*/
static colormap *find_best_palette(histogram *hist, const liq_attr *options, const double max_mse, const f_pixel fixed_colors[], const unsigned int fixed_colors_count, double *palette_error_p)
{
unsigned int max_colors = options->max_colors;
// if output is posterized it doesn't make sense to aim for perfrect colors, so increase target_mse
// at this point actual gamma is not set, so very conservative posterization estimate is used
const double target_mse = MIN(max_mse, MAX(options->target_mse, pow((1<<options->min_posterization_output)/1024.0, 2)));
int feedback_loop_trials = options->feedback_loop_trials;
colormap *acolormap = NULL;
double least_error = MAX_DIFF;
double target_mse_overshoot = feedback_loop_trials>0 ? 1.05 : 1.0;
const float total_trials = (float)(feedback_loop_trials>0?feedback_loop_trials:1);
do {
colormap *newmap;
if (hist->size && fixed_colors_count < max_colors) {
newmap = mediancut(hist, max_colors-fixed_colors_count, target_mse * target_mse_overshoot, MAX(MAX(45.0/65536.0, target_mse), least_error)*1.2,
options->malloc, options->free);
} else {
feedback_loop_trials = 0;
newmap = NULL;
}
newmap = add_fixed_colors_to_palette(newmap, max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free);
if (!newmap) {
return NULL;
}
if (feedback_loop_trials <= 0) {
return newmap;
}
// after palette has been created, total error (MSE) is calculated to keep the best palette
// at the same time K-Means iteration is done to improve the palette
// and histogram weights are adjusted based on remapping error to give more weight to poorly matched colors
const bool first_run_of_target_mse = !acolormap && target_mse > 0;
double total_error = kmeans_do_iteration(hist, newmap, first_run_of_target_mse ? NULL : adjust_histogram_callback);
// goal is to increase quality or to reduce number of colors used if quality is good enough
if (!acolormap || total_error < least_error || (total_error <= target_mse && newmap->colors < max_colors)) {
if (acolormap) pam_freecolormap(acolormap);
acolormap = newmap;
if (total_error < target_mse && total_error > 0) {
// K-Means iteration improves quality above what mediancut aims for
// this compensates for it, making mediancut aim for worse
target_mse_overshoot = MIN(target_mse_overshoot*1.25, target_mse/total_error);
}
least_error = total_error;
// if number of colors could be reduced, try to keep it that way
// but allow extra color as a bit of wiggle room in case quality can be improved too
max_colors = MIN(newmap->colors+1, max_colors);
feedback_loop_trials -= 1; // asymptotic improvement could make it go on forever
} else {
for(unsigned int j=0; j < hist->size; j++) {
hist->achv[j].adjusted_weight = (hist->achv[j].perceptual_weight + hist->achv[j].adjusted_weight)/2.0;
}
target_mse_overshoot = 1.0;
feedback_loop_trials -= 6;
// if error is really bad, it's unlikely to improve, so end sooner
if (total_error > least_error*4) feedback_loop_trials -= 3;
pam_freecolormap(newmap);
}
float fraction_done = 1.f-MAX(0.f, feedback_loop_trials/total_trials);
if (liq_progress(options, options->progress_stage1 + fraction_done * options->progress_stage2)) break;
liq_verbose_printf(options, " selecting colors...%d%%", (int)(100.f * fraction_done));
}
while(feedback_loop_trials > 0);
*palette_error_p = least_error;
return acolormap;
}
static colormap *histogram_to_palette(const histogram *hist, const liq_attr *options) {
if (!hist->size) {
return NULL;
}
colormap *acolormap = pam_colormap(hist->size, options->malloc, options->free);
for(unsigned int i=0; i < hist->size; i++) {
acolormap->palette[i].acolor = hist->achv[i].acolor;
acolormap->palette[i].popularity = hist->achv[i].perceptual_weight;
}
return acolormap;
}
LIQ_NONNULL static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **result_output)
{
colormap *acolormap;
double palette_error = -1;
assert((verbose_print(options, "SLOW debug checks enabled. Recompile with NDEBUG for normal operation."),1));
const bool few_input_colors = hist->size+fixed_colors_count <= options->max_colors;
if (liq_progress(options, options->progress_stage1)) return LIQ_ABORTED;
// If image has few colors to begin with (and no quality degradation is required)
// then it's possible to skip quantization entirely
if (few_input_colors && options->target_mse == 0) {
acolormap = add_fixed_colors_to_palette(histogram_to_palette(hist, options), options->max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free);
palette_error = 0;
} else {
const double max_mse = options->max_mse * (few_input_colors ? 0.33 : 1.0); // when degrading image that's already paletted, require much higher improvement, since pal2pal often looks bad and there's little gain
acolormap = find_best_palette(hist, options, max_mse, fixed_colors, fixed_colors_count, &palette_error);
if (!acolormap) {
return LIQ_VALUE_OUT_OF_RANGE;
}
// K-Means iteration approaches local minimum for the palette
const double iteration_limit = options->kmeans_iteration_limit;
unsigned int iterations = options->kmeans_iterations;
if (!iterations && palette_error < 0 && max_mse < MAX_DIFF) iterations = 1; // otherwise total error is never calculated and MSE limit won't work
if (iterations) {
// likely_colormap_index (used and set in kmeans_do_iteration) can't point to index outside colormap
if (acolormap->colors < 256) for(unsigned int j=0; j < hist->size; j++) {
if (hist->achv[j].tmp.likely_colormap_index >= acolormap->colors) {
hist->achv[j].tmp.likely_colormap_index = 0; // actual value doesn't matter, as the guess is out of date anyway
}
}
verbose_print(options, " moving colormap towards local minimum");
double previous_palette_error = MAX_DIFF;
for(unsigned int i=0; i < iterations; i++) {
palette_error = kmeans_do_iteration(hist, acolormap, NULL);
if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + (i * options->progress_stage3 * 0.9f) / iterations)) {
break;
}
if (fabs(previous_palette_error-palette_error) < iteration_limit) {
break;
}
if (palette_error > max_mse*1.5) { // probably hopeless
if (palette_error > max_mse*3.0) break; // definitely hopeless
i++;
}
previous_palette_error = palette_error;
}
}
if (palette_error > max_mse) {
liq_verbose_printf(options, " image degradation MSE=%.3f (Q=%d) exceeded limit of %.3f (%d)",
mse_to_standard_mse(palette_error), mse_to_quality(palette_error),
mse_to_standard_mse(max_mse), mse_to_quality(max_mse));
pam_freecolormap(acolormap);
return LIQ_QUALITY_TOO_LOW;
}
}
if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + options->progress_stage3 * 0.95f)) {
pam_freecolormap(acolormap);
return LIQ_ABORTED;
}
sort_palette(acolormap, options);
// If palette was created from a multi-image histogram,
// then it shouldn't be optimized for one image during remapping
if (fixed_result_colors) {
for(unsigned int i=0; i < acolormap->colors; i++) {
acolormap->palette[i].fixed = true;
}
}
liq_result *result = options->malloc(sizeof(liq_result));
if (!result) return LIQ_OUT_OF_MEMORY;
*result = (liq_result){
.magic_header = liq_result_magic,
.malloc = options->malloc,
.free = options->free,
.palette = acolormap,
.palette_error = palette_error,
.use_dither_map = options->use_dither_map,
.gamma = gamma,
.min_posterization_output = options->min_posterization_output,
};
*result_output = result;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image(liq_result *result, liq_image *input_image, void *buffer, size_t buffer_size)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) {
return LIQ_INVALID_POINTER;
}
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) {
return LIQ_INVALID_POINTER;
}
if (!CHECK_USER_POINTER(buffer)) {
return LIQ_INVALID_POINTER;
}
const size_t required_size = input_image->width * input_image->height;
if (buffer_size < required_size) {
return LIQ_BUFFER_TOO_SMALL;
}
unsigned char **rows = malloc(input_image->height * sizeof(unsigned char *));
unsigned char *buffer_bytes = buffer;
for(unsigned int i=0; i < input_image->height; i++) {
rows[i] = &buffer_bytes[input_image->width * i];
}
liq_error error = liq_write_remapped_image_rows(result, input_image, rows);
free(rows);
return error;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image_rows(liq_result *quant, liq_image *input_image, unsigned char **row_pointers)
{
if (!CHECK_STRUCT_TYPE(quant, liq_result)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER;
for(unsigned int i=0; i < input_image->height; i++) {
if (!CHECK_USER_POINTER(row_pointers+i) || !CHECK_USER_POINTER(row_pointers[i])) return LIQ_INVALID_POINTER;
}
if (quant->remapping) {
liq_remapping_result_destroy(quant->remapping);
}
liq_remapping_result *const result = quant->remapping = liq_remapping_result_create(quant);
if (!result) return LIQ_OUT_OF_MEMORY;
if (!input_image->edges && !input_image->dither_map && quant->use_dither_map) {
contrast_maps(input_image);
}
if (liq_remap_progress(result, result->progress_stage1 * 0.25f)) {
return LIQ_ABORTED;
}
/*
** Step 4: map the colors in the image to their closest match in the
** new colormap, and write 'em out.
*/
float remapping_error = result->palette_error;
if (result->dither_level == 0) {
set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output);
remapping_error = remap_to_palette(input_image, row_pointers, result->palette);
} else {
const bool generate_dither_map = result->use_dither_map && (input_image->edges && !input_image->dither_map);
if (generate_dither_map) {
// If dithering (with dither map) is required, this image is used to find areas that require dithering
remapping_error = remap_to_palette(input_image, row_pointers, result->palette);
update_dither_map(row_pointers, input_image);
}
if (liq_remap_progress(result, result->progress_stage1 * 0.5f)) {
return LIQ_ABORTED;
}
// remapping above was the last chance to do K-Means iteration, hence the final palette is set after remapping
set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output);
if (!remap_to_palette_floyd(input_image, row_pointers, result, MAX(remapping_error*2.4, 16.f/256.f), generate_dither_map)) {
return LIQ_ABORTED;
}
}
// remapping error from dithered image is absurd, so always non-dithered value is used
// palette_error includes some perceptual weighting from histogram which is closer correlated with dssim
// so that should be used when possible.
if (result->palette_error < 0) {
result->palette_error = remapping_error;
}
return LIQ_OK;
}
LIQ_EXPORT int liq_version() {
return LIQ_VERSION;
}
|
035be_so8_advgcc.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
#include <stdio.h>
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
double section1;
double section2;
};
void bf0(float *restrict r50_vec, float *restrict r51_vec, float *restrict r52_vec, float *restrict r53_vec, float *restrict r82_vec, float *restrict r83_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw);
void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r49_vec, float *restrict r50_vec, float *restrict r51_vec, float *restrict r52_vec, float *restrict r53_vec, float *restrict r82_vec, float *restrict r83_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw);
int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
float(*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float(*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float (*r49)[y_size + 2 + 2][z_size + 2 + 2];
posix_memalign((void**)&r49, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2]));
float (*r50)[y_size + 2 + 2][z_size + 2 + 2];
posix_memalign((void**)&r50, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2]));
float (*r51)[y_size + 2 + 2][z_size + 2 + 2];
posix_memalign((void**)&r51, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2]));
float (*r52)[y_size + 2 + 2][z_size + 2 + 2];
posix_memalign((void**)&r52, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2]));
float (*r53)[y_size + 2 + 2][z_size + 2 + 2];
posix_memalign((void**)&r53, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2]));
float (*r82)[y_size + 2 + 2][z_size + 2 + 2];
posix_memalign((void**)&r82, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2]));
float (*r83)[y_size + 2 + 2][z_size + 2 + 2];
posix_memalign((void**)&r83, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2]));
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(static,1)
for (int x = x_m - 2; x <= x_M + 2; x += 1)
{
for (int y = y_m - 2; y <= y_M + 2; y += 1)
{
#pragma omp simd aligned(delta,phi,theta:32)
for (int z = z_m - 2; z <= z_M + 2; z += 1)
{
r49[x + 2][y + 2][z + 2] = sqrt(2*delta[x + 8][y + 8][z + 8] + 1);
r50[x + 2][y + 2][z + 2] = cos(theta[x + 8][y + 8][z + 8]);
r51[x + 2][y + 2][z + 2] = cos(phi[x + 8][y + 8][z + 8]);
r52[x + 2][y + 2][z + 2] = sin(theta[x + 8][y + 8][z + 8]);
r53[x + 2][y + 2][z + 2] = sin(phi[x + 8][y + 8][z + 8]);
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000;
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
int xb_size = block_sizes[0];
int sf = 4;
int t_blk_size = 2 * sf * (time_M - time_m);
printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size);
for (int t_blk = time_m; t_blk <= 1 + sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m-2 ; xb <= (x_M + 2 + sf * (time_M - time_m)); xb += xb_size)
{
//printf(" Change of outer xblock %d \n", xb);
for (int yb = y_m-2 ; yb <= (y_M+2 + sf * (time_M - time_m)); yb += yb_size)
{
//printf(" Timestep tw: %d, Updating x: %d y: %d \n", xb, yb);
for (int time = t_blk, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3); time <= 2 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t1 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3))
{
int tw = ((time / sf) % (time_M - time_m + 1));
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
bf0((float *)r50, (float *)r51, (float *)r52, (float *)r53, (float *)r82, (float *)r83, u_vec, v_vec, x_size, y_size, z_size, time, t0, x0_blk0_size, x_M + 2, x_m-2, y0_blk0_size, y_M+2, y_m-2, z_M, z_m, nthreads, xb, yb, xb_size, yb_size, tw);
//printf("\n BF0 - 1 IS OVER");
/*==============================================*/
bf1(damp_vec, dt, epsilon_vec, (float *)r49, (float *)r50, (float *)r51, (float *)r52, (float *)r53, (float *)r82, (float *)r83, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x_size, y_size, z_size, time, t0, t1, t2, x0_blk0_size, x_M, x_m, y0_blk0_size, y_M, y_m , z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, tw);
//printf("\n BF1 - 1 IS OVER");
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000;
}
}
}
}
free(r53);
free(r52);
free(r51);
free(r50);
free(r49);
free(r82);
free(r83);
return 0;
}
void bf0(float *restrict r50_vec, float *restrict r51_vec, float *restrict r52_vec, float *restrict r53_vec, float *restrict r82_vec, float *restrict r83_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw)
{
float (*restrict r50)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r50_vec;
float (*restrict r51)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r51_vec;
float (*restrict r52)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r52_vec;
float (*restrict r53)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r53_vec;
float (*restrict r82)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r82_vec;
float (*restrict r83)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r83_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
//printf(" Change of inner x0_blk0 %d \n", x0_blk0);
for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++)
{
//printf(" bf0 Timestep tw: %d, Updating x: %d \n", tw, x - time + 1);
for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++)
{
#pragma omp simd aligned(u, v : 32)
for (int z = z_m - 2; z <= z_M + 2; z += 1)
{
//printf(" bf0 Updating x: %d y: %d z: %d \n", x - time + 2, y - time + 2, z + 2);
r82[x - time + 2][y - time + 2][z + 2] = -(8.33333346e-3F*(u[t0][x - time + 6][y - time + 8][z + 8] - u[t0][x - time + 10][y - time + 8][z + 8]) + 6.66666677e-2F*(-u[t0][x - time + 7][y - time + 8][z + 8] + u[t0][x - time + 9][y - time + 8][z + 8]))*r51[x - time + 2][y - time + 2][z + 2]*r52[x - time + 2][y - time + 2][z + 2] - (8.33333346e-3F*(u[t0][x - time + 8][y - time + 6][z + 8] - u[t0][x - time + 8][y - time + 10][z + 8]) + 6.66666677e-2F*(-u[t0][x - time + 8][y - time + 7][z + 8] + u[t0][x - time + 8][y - time + 9][z + 8]))*r52[x - time + 2][y - time + 2][z + 2]*r53[x - time + 2][y - time + 2][z + 2] - (8.33333346e-3F*(u[t0][x - time + 8][y - time + 8][z + 6] - u[t0][x - time + 8][y - time + 8][z + 10]) + 6.66666677e-2F*(-u[t0][x - time + 8][y - time + 8][z + 7] + u[t0][x - time + 8][y - time + 8][z + 9]))*r50[x - time + 2][y - time + 2][z + 2];
r83[x - time + 2][y - time + 2][z + 2] = -(8.33333346e-3F*(v[t0][x - time + 6][y - time + 8][z + 8] - v[t0][x - time + 10][y - time + 8][z + 8]) + 6.66666677e-2F*(-v[t0][x - time + 7][y - time + 8][z + 8] + v[t0][x - time + 9][y - time + 8][z + 8]))*r51[x - time + 2][y - time + 2][z + 2]*r52[x - time + 2][y - time + 2][z + 2] - (8.33333346e-3F*(v[t0][x - time + 8][y - time + 6][z + 8] - v[t0][x - time + 8][y - time + 10][z + 8]) + 6.66666677e-2F*(-v[t0][x - time + 8][y - time + 7][z + 8] + v[t0][x - time + 8][y - time + 9][z + 8]))*r52[x - time + 2][y - time + 2][z + 2]*r53[x - time + 2][y - time + 2][z + 2] - (8.33333346e-3F*(v[t0][x - time + 8][y - time + 8][z + 6] - v[t0][x - time + 8][y - time + 8][z + 10]) + 6.66666677e-2F*(-v[t0][x - time + 8][y - time + 8][z + 7] + v[t0][x - time + 8][y - time + 8][z + 9]))*r50[x - time + 2][y - time + 2][z + 2];
//printf("bf0 Timestep tw: %d, Updating x: %d y: %d value: %f \n", tw, x - time + 2, y - time + 2, v[t0][x - time + 9][y - time + 8][z + 8]);
}
}
}
}
}
}
}
void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r49_vec, float *restrict r50_vec, float *restrict r51_vec, float *restrict r52_vec, float *restrict r53_vec, float *restrict r82_vec, float *restrict r83_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw)
{
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
float(*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__((aligned(64))) = (float(*)[epsilon_vec->size[1]][epsilon_vec->size[2]])epsilon_vec->data;
float (*restrict r49)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r49_vec;
float (*restrict r50)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r50_vec;
float (*restrict r51)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r51_vec;
float (*restrict r52)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r52_vec;
float (*restrict r53)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r53_vec;
float (*restrict r82)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r82_vec;
float (*restrict r83)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r83_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
//printf("In bf1 \n");
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x1_blk0 = max((x_m + time), xb - 0); x1_blk0 <= +min((x_M + time), (xb - 0 + xb_size)); x1_blk0 += x1_blk0_size)
{
//printf(" Change of inner x1_blk0 %d \n", x1_blk0);
for (int y1_blk0 = max((y_m + time), yb - 0); y1_blk0 <= +min((y_M + time), (yb - 0 + yb_size)); y1_blk0 += y1_blk0_size)
{
for (int x = x1_blk0; x <= min(min((x_M + time), (xb - 0 + xb_size - 1)), (x1_blk0 + x1_blk0_size - 1)); x++)
{
//printf(" bf1 Timestep tw: %d, Updating x: %d \n", tw, x - time + 4);
for (int y = y1_blk0; y <= min(min((y_M + time), (yb - 0 + yb_size - 1)), (y1_blk0 + y1_blk0_size - 1)); y++)
{
//printf(" bf1 Timestep tw: %d, Updating x: %d y: %d \n", tw, x - time + 4, y - time + 4);
#pragma omp simd aligned(damp, epsilon, u, v, vp : 32)
for (int z = z_m; z <= z_M; z += 1)
{
//printf(" bf1 Updating x: %d y: %d z: %d \n", x - time + 4, y - time + 4, z + 4);
//printf(" bf1 Updating x: %d y: %d z: %d \n", x - time + 4, y - time + 4, z + 4);
float r93 = 1.0/dt;
float r92 = 1.0/(dt*dt);
float r91 = 6.66666677e-2F*(r50[x - time + 2][y - time + 2][z + 1]*r83[x - time + 2][y - time + 2][z + 1] - r50[x - time + 2][y - time + 2][z + 3]*r83[x - time + 2][y - time + 2][z + 3] + r51[x - time + 1][y - time + 2][z + 2]*r52[x - time + 1][y - time + 2][z + 2]*r83[x - time + 1][y - time + 2][z + 2] - r51[x - time + 3][y - time + 2][z + 2]*r52[x - time + 3][y - time + 2][z + 2]*r83[x - time + 3][y - time + 2][z + 2] + r52[x - time + 2][y - time + 1][z + 2]*r53[x - time + 2][y - time + 1][z + 2]*r83[x - time + 2][y - time + 1][z + 2] - r52[x - time + 2][y - time + 3][z + 2]*r53[x - time + 2][y - time + 3][z + 2]*r83[x - time + 2][y - time + 3][z + 2]);
float r90 = 8.33333346e-3F*(-r50[x - time + 2][y - time + 2][z]*r83[x - time + 2][y - time + 2][z] + r50[x - time + 2][y - time + 2][z + 4]*r83[x - time + 2][y - time + 2][z + 4] - r51[x - time] [y - time + 2][z + 2]*r52[x - time] [y - time + 2][z + 2]*r83[x - time] [y - time + 2][z + 2] + r51[x - time + 4][y - time + 2][z + 2]*r52[x - time + 4][y - time + 2][z + 2]*r83[x - time + 4][y - time + 2][z + 2] - r52[x - time + 2][y - time][z + 2]*r53[x - time + 2][y - time][z + 2]*r83[x - time + 2][y - time][z + 2] + r52[x - time + 2][y - time + 4][z + 2]*r53[x - time + 2][y - time + 4][z + 2]*r83[x - time + 2][y - time + 4][z + 2]);
float r89 = pow(vp[x - time + 8][y - time + 8][z + 8], -2);
float r88 = 1.0/(r89*r92 + r93*damp[x - time + 1][y - time + 1][z + 1]);
float r87 = 8.33333346e-3F*(r50[x - time + 2][y - time + 2][z]*r82[x - time + 2][y - time + 2][z] - r50[x - time + 2][y - time + 2][z + 4]*r82[x - time + 2][y - time + 2][z + 4] + r51[x - time] [y - time + 2][z + 2]*r52[x - time] [y - time + 2][z + 2]*r82[x - time] [y - time + 2][z + 2] - r51[x - time + 4][y - time + 2][z + 2]*r52[x - time + 4][y - time + 2][z + 2]*r82[x - time + 4][y - time + 2][z + 2] + r52[x - time + 2][y - time][z + 2]*r53[x - time + 2][y - time][z + 2]*r82[x - time + 2][y - time][z + 2] - r52[x - time + 2][y - time + 4][z + 2]*r53[x - time + 2][y - time + 4][z + 2]*r82[x - time + 2][y - time + 4][z + 2]) + 6.66666677e-2F*(-r50[x - time + 2][y - time + 2][z + 1]*r82[x - time + 2][y - time + 2][z + 1] + r50[x - time + 2][y - time + 2][z + 3]*r82[x - time + 2][y - time + 2][z + 3] - r51[x - time + 1][y - time + 2][z + 2]*r52[x - time + 1][y - time + 2][z + 2]*r82[x - time + 1][y - time + 2][z + 2] + r51[x - time + 3][y - time + 2][z + 2]*r52[x - time + 3][y - time + 2][z + 2]*r82[x - time + 3][y - time + 2][z + 2] - r52[x - time + 2][y - time + 1][z + 2]*r53[x - time + 2][y - time + 1][z + 2]*r82[x - time + 2][y - time + 1][z + 2] + r52[x - time + 2][y - time + 3][z + 2]*r53[x - time + 2][y - time + 3][z + 2]*r82[x - time + 2][y - time + 3][z + 2]) - 1.78571425e-5F*(u[t0][x - time + 4][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 4][z + 8] + u[t0][x - time + 8][y - time + 8][z + 4] + u[t0][x - time + 8][y - time + 8][z + 12] + u[t0][x - time + 8][y - time + 12][z + 8] + u[t0][x - time + 12][y - time + 8][z + 8]) + 2.53968248e-4F*(u[t0][x - time + 5][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 5][z + 8] + u[t0][x - time + 8][y - time + 8][z + 5] + u[t0][x - time + 8][y - time + 8][z + 11] + u[t0][x - time + 8][y - time + 11][z + 8] + u[t0][x - time + 11][y - time + 8][z + 8]) - 1.99999996e-3F*(u[t0][x - time + 6][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 6][z + 8] + u[t0][x - time + 8][y - time + 8][z + 6] + u[t0][x - time + 8][y - time + 8][z + 10] + u[t0][x - time + 8][y - time + 10][z + 8] + u[t0][x - time + 10][y - time + 8][z + 8]) + 1.59999996e-2F*(u[t0][x - time + 7][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 7][z + 8] + u[t0][x - time + 8][y - time + 8][z + 7] + u[t0][x - time + 8][y - time + 8][z + 9] + u[t0][x - time + 8][y - time + 9][z + 8] + u[t0][x - time + 9][y - time + 8][z + 8]) - 8.54166647e-2F*u[t0][x - time + 8][y - time + 8][z + 8];
float r80 = r92*(-2.0F*u[t0][x - time + 8][y - time + 8][z + 8] + u[t1][x - time + 8][y - time + 8][z + 8]);
float r81 = r92*(-2.0F*v[t0][x - time + 8][y - time + 8][z + 8] + v[t1][x - time + 8][y - time + 8][z + 8]);
u[t2][x - time + 8][y - time + 8][z + 8] = r88*((-r80)*r89 + r87*(2*epsilon[x - time + 8][y - time + 8][z + 8] + 1) + r93*(damp[x - time + 1][y - time + 1][z + 1]*u[t0][x - time + 8][y - time + 8][z + 8]) + (r90 + r91)*r49[x - time + 2][y - time + 2][z + 2]);
v[t2][x - time + 8][y - time + 8][z + 8] = r88*((-r81)*r89 + r87*r49[x - time + 2][y - time + 2][z + 2] + r90 + r91 + r93*(damp[x - time + 1][y - time + 1][z + 1]*v[t0][x - time + 8][y - time + 8][z + 8]));
}
//int sp_zi_M = nnz_sp_source_mask[x - time][y - time] - 1;
for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1)
{
int zind = sp_source_mask[x - time][y - time][sp_zi];
float r0 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
//#pragma omp atomic update
u[t2][x - time + 8][y - time + 8][zind + 8] += r0;
float r1 = save_src_v[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
//#pragma omp atomic update
v[t2][x - time + 8][y - time + 8][zind + 8] += r1;
//printf("Source injection at time %d , at : x: %d, y: %d, %d, %f, %f \n", tw, x - time + 8, y - time + 8, zind + 8, r0, r1);
}
}
}
}
}
}
}
|
GB_unaryop__lnot_uint8_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint8_int16
// op(A') function: GB_tran__lnot_uint8_int16
// C type: uint8_t
// A type: int16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint8_int16
(
uint8_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint8_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__sinh_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sinh_fp64_fp64)
// op(A') function: GB (_unop_tran__sinh_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = sinh (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = sinh (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = sinh (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SINH || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sinh_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = sinh (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = sinh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sinh_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nr_incore.c | /*
* File: nr_incore.c
* Author: Qiming Sun <osirpt.sun@gmail.com>
*
*/
#include <stdlib.h>
#include <assert.h>
//#include <omp.h>
#include "config.h"
#include "np_helper/np_helper.h"
#include "nr_ao2mo.h"
#define OUTPUTIJ 1
#define INPUT_IJ 2
void AO2MOtranse1_incore_s4(int (*fmmm)(), int row_id,
double *vout, double *eri_ao, double *buf,
struct _AO2MOEnvs *envs)
{
int nao = envs->nao;
size_t npair = (*fmmm)(NULL, NULL, buf, envs, INPUT_IJ);
size_t ij_pair = (*fmmm)(NULL, NULL, buf, envs, OUTPUTIJ);
double *peri = eri_ao + npair * (row_id+envs->klsh_start);
NPdunpack_tril(nao, peri, buf, 0);
(*fmmm)(vout+ij_pair*row_id, buf, buf+nao*nao, envs, 0);
}
void AO2MOtranse1_incore_s8(int (*fmmm)(), int row_id,
double *vout, double *eri_ao, double *buf,
struct _AO2MOEnvs *envs)
{
int nao = envs->nao;
size_t npair = (*fmmm)(NULL, NULL, buf, envs, INPUT_IJ);
size_t ij_pair = (*fmmm)(NULL, NULL, buf, envs, OUTPUTIJ);
double *buf0 = malloc(sizeof(double) * npair);
// Note AO2MOnr_e1incore_drv stores ij_start in envs.klsh_start
NPdunpack_row(npair, row_id+envs->klsh_start, eri_ao, buf0);
NPdunpack_tril(nao, buf0, buf, 0);
(*fmmm)(vout+ij_pair*row_id, buf, buf+nao*nao, envs, 0);
free(buf0);
}
// ij_start and ij_count for the ij-AO-pair in eri_ao
void AO2MOnr_e1incore_drv(void (*ftranse2_like)(), int (*fmmm)(),
double *vout, double *eri_ao, double *mo_coeff,
int ij_start, int ij_count, int nao,
int i_start, int i_count, int j_start, int j_count)
{
struct _AO2MOEnvs envs;
envs.bra_start = i_start;
envs.bra_count = i_count;
envs.ket_start = j_start;
envs.ket_count = j_count;
envs.nao = nao;
envs.mo_coeff = mo_coeff;
envs.klsh_start = ij_start;
int i;
#pragma omp parallel default(none) \
shared(ftranse2_like, fmmm, vout, eri_ao, ij_count, envs, \
nao, i_count, j_count) \
private(i)
{
double *buf = malloc(sizeof(double) * (nao+i_count) * (nao+j_count));
#pragma omp for schedule(dynamic)
for (i = 0; i < ij_count; i++) {
(*ftranse2_like)(fmmm, i, vout, eri_ao, buf, &envs);
}
free(buf);
}
}
|
dmul.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <HiParTI.h>
void print_usage(char ** argv) {
printf("Usage: %s [options] \n\n", argv[0]);
printf("Options: -x X INPUT\n");
printf(" -y Y INPUT\n");
printf(" -o OUTPUT\n");
printf(" -d CUDA_DEV_ID\n");
printf(" --help\n");
printf("\n");
}
int main(int argc, char *argv[]) {
char Xfname[1000], Yfname[1000];
FILE *fo = NULL;
ptiSparseTensor X, Y, Z;
int cuda_dev_id = -2;
int niters = 5;
int nthreads = 1;
if(argc <= 3) { // #Required arguments
print_usage(argv);
exit(1);
}
int c;
for(;;) {
static struct option long_options[] = {
{"x-input", required_argument, 0, 'x'},
{"y-input", required_argument, 0, 'y'},
{"output", optional_argument, 0, 'o'},
{"cuda-dev-id", optional_argument, 0, 'd'},
{"help", no_argument, 0, 0},
{0, 0, 0, 0}
};
int option_index = 0;
c = getopt_long(argc, argv, "x:y:o:d:", long_options, &option_index);
if(c == -1) {
break;
}
switch(c) {
case 'x':
strcpy(Xfname, optarg);
printf("X input file: %s\n", optarg); fflush(stdout);
break;
case 'y':
strcpy(Yfname, optarg);
printf("Y input file: %s\n", optarg); fflush(stdout);
break;
case 'o':
fo = fopen(optarg, "w");
ptiAssert(fo != NULL);
printf("output file: %s\n", optarg); fflush(stdout);
break;
case 'd':
sscanf(optarg, "%d", &cuda_dev_id);
break;
case '?': /* invalid option */
case 'h':
default:
print_usage(argv);
exit(1);
}
}
printf("cuda_dev_id: %d\n", cuda_dev_id);
ptiAssert(ptiLoadSparseTensor(&X, 1, Xfname) == 0);
ptiAssert(ptiLoadSparseTensor(&Y, 1, Yfname) == 0);
/* For warm-up caches, timing not included */
if(cuda_dev_id == -2) {
ptiAssert(ptiSparseTensorDotMulEq(&Z, &X, &Y) == 0);
} else if(cuda_dev_id == -1) {
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
printf("nthreads: %d\n", nthreads);
ptiAssert(ptiOmpSparseTensorDotMulEq(&Z, &X, &Y) == 0);
}
for(int it=0; it<niters; ++it) {
if(cuda_dev_id == -2) {
ptiAssert(ptiSparseTensorDotMulEq(&Z, &X, &Y) == 0);
} else if(cuda_dev_id == -1) {
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
printf("nthreads: %d\n", nthreads);
ptiAssert(ptiOmpSparseTensorDotMulEq(&Z, &X, &Y) == 0);
}
}
if (fo != NULL) {
ptiAssert(ptiDumpSparseTensor(&Z, 1, fo) == 0);
fclose(fo);
}
ptiFreeSparseTensor(&X);
ptiFreeSparseTensor(&Y);
ptiFreeSparseTensor(&Z);
return 0;
}
|
DRB045-doall1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
/*
Simplest one dimension array computation
*/
int a[100];
int main()
{
int i;
#pragma omp parallel for private(i)
for (i=0;i<100;i++)
a[i]=i;
#pragma omp parallel for private(i)
for (i=0;i<100;i++)
a[i]=a[i]+1;
for (i=0;i<100;i++)
printf("%d\n",a[i]);
return 0;
}
|
struct_scale.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.10 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* Structured scale routine
*
*****************************************************************************/
#include "_hypre_struct_mv.h"
/*--------------------------------------------------------------------------
* hypre_StructScale
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructScale( double alpha,
hypre_StructVector *y )
{
hypre_Box *y_data_box;
HYPRE_Int yi;
double *yp;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_Index unit_stride;
HYPRE_Int i;
hypre_SetIndex(unit_stride, 1, 1, 1);
boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(y));
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
start = hypre_BoxIMin(box);
y_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i);
yp = hypre_StructVectorBoxData(y, i);
hypre_BoxGetSize(box, loop_size);
hypre_BoxLoop1Begin(hypre_StructVectorDim(y), loop_size,
y_data_box, start, unit_stride, yi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,yi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(yi)
{
yp[yi] *= alpha;
}
hypre_BoxLoop1End(yi);
}
return hypre_error_flag;
}
|
balance.c | /*-
* Copyright (c) 2012-2017 Ilya Kaliman
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef EFP_USE_MPI
#include <mpi.h>
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#include "balance.h"
#include "private.h"
#ifdef EFP_USE_MPI
struct master {
int total, range[2];
};
#define MPI_CHUNK_SIZE 16
static int
master_get_work(struct master *master, int range[2])
{
#ifdef _OPENMP
#pragma omp critical
#endif
{
master->range[0] = master->range[1];
master->range[1] += MPI_CHUNK_SIZE;
if (master->range[1] > master->total)
master->range[1] = master->total;
range[0] = master->range[0];
range[1] = master->range[1];
}
return range[1] > range[0];
}
static void
master_on_master(struct master *master)
{
MPI_Status status;
int size, range[2];
MPI_Comm_size(MPI_COMM_WORLD, &size);
while (master_get_work(master, range)) {
MPI_Recv(NULL, 0, MPI_INT, MPI_ANY_SOURCE, 0,
MPI_COMM_WORLD, &status);
MPI_Send(range, 2, MPI_INT, status.MPI_SOURCE, 0,
MPI_COMM_WORLD);
}
range[0] = range[1] = -1;
for (int i = 1; i < size; i++) {
MPI_Recv(NULL, 0, MPI_INT, MPI_ANY_SOURCE, 0,
MPI_COMM_WORLD, &status);
MPI_Send(range, 2, MPI_INT, status.MPI_SOURCE, 0,
MPI_COMM_WORLD);
}
}
static void
slave_on_master(struct master *master, struct efp *efp, work_fn fn, void *data)
{
int range[2];
while (master_get_work(master, range))
fn(efp, range[0], range[1], data);
}
#ifndef _OPENMP
static int
omp_get_thread_num(void)
{
return 0;
}
#endif /* _OPENMP */
static void
do_master(struct efp *efp, work_fn fn, void *data)
{
struct master master;
master.total = (int)efp->n_frag;
master.range[0] = master.range[1] = 0;
#ifdef _OPENMP
#pragma omp parallel
#endif
{
if (omp_get_thread_num() == 0)
master_on_master(&master);
else
slave_on_master(&master, efp, fn, data);
}
}
static void
do_slave(struct efp *efp, work_fn fn, void *data)
{
int range[2];
for (;;) {
MPI_Send(NULL, 0, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Recv(range, 2, MPI_INT, 0, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
if (range[0] == -1 ||
range[1] == -1)
break;
fn(efp, range[0], range[1], data);
}
}
#endif /* EFP_USE_MPI */
void
efp_allreduce(double *x, size_t n)
{
#ifdef EFP_USE_MPI
MPI_Allreduce(MPI_IN_PLACE, x, (int)n, MPI_DOUBLE,
MPI_SUM, MPI_COMM_WORLD);
#else
(void)x;
(void)n;
#endif
}
void
efp_balance_work(struct efp *efp, work_fn fn, void *data)
{
#ifdef EFP_USE_MPI
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size == 1)
fn(efp, 0, efp->n_frag, data);
else {
MPI_Barrier(MPI_COMM_WORLD);
if (rank == 0)
do_master(efp, fn, data);
else
do_slave(efp, fn, data);
MPI_Barrier(MPI_COMM_WORLD);
}
#else
fn(efp, 0, efp->n_frag, data);
#endif
}
|
ParallelOpenMP.h | #pragma once
#include <ATen/ATen.h>
#include <cstddef>
#include <exception>
#ifdef _OPENMP
#define INTRA_OP_PARALLEL
#include <omp.h>
#endif
namespace at {
template <class F>
inline void parallel_for(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const F& f) {
TORCH_CHECK(grain_size >= 0);
if (begin >= end) {
return;
}
#ifdef _OPENMP
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
#pragma omp parallel if (!omp_in_parallel() && ((end - begin) >= grain_size))
{
int64_t num_threads = omp_get_num_threads();
int64_t tid = omp_get_thread_num();
int64_t chunk_size = divup((end - begin), num_threads);
int64_t begin_tid = begin + tid * chunk_size;
if (begin_tid < end) {
try {
f(begin_tid, std::min(end, chunk_size + begin_tid));
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
}
}
if (eptr) {
std::rethrow_exception(eptr);
}
#else
f(begin, end);
#endif
}
template <class scalar_t, class F, class SF>
inline scalar_t parallel_reduce(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const scalar_t ident,
const F& f,
const SF& sf) {
TORCH_CHECK(grain_size >= 0);
if (begin >= end) {
return ident;
} else if (in_parallel_region() || get_num_threads() == 1) {
return f(begin, end, ident);
} else {
const int64_t num_results = divup((end - begin), grain_size);
std::vector<scalar_t> results(num_results);
scalar_t* results_data = results.data();
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
#pragma omp parallel for if ((end - begin) >= grain_size)
for (int64_t id = 0; id < num_results; id++) {
int64_t i = begin + id * grain_size;
try {
results_data[id] = f(i, i + std::min(end - i, grain_size), ident);
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
}
if (eptr) {
std::rethrow_exception(eptr);
}
scalar_t result = ident;
for (auto partial_result : results) {
result = sf(result, partial_result);
}
return result;
}
}
} // namespace at
|
test_nthreads.c | //===-- test_nthreads.cc - Test setting the number of threads ----*- C -*-===//
//
// Part of the LOMP project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define NTHREADS_MAX 128
int main(void) {
int failed = 0;
// By doing this we can check whether setting the number of threads to what it already is
// is accepted, even when a real change is not.
// (If we see a failure when "changing" from 1 to 1, that's wrong, while
// changing from 1 to 2 should be rejected).
omp_set_num_threads(1);
for (int i = 1; i <= NTHREADS_MAX; ++i) {
#pragma omp parallel num_threads(i)
{
#pragma omp single
{
int nthreads = omp_get_num_threads();
if (nthreads != i) {
printf("Got %d threads, should have been %d\n", nthreads, i);
fflush(stdout);
failed++;
}
}
}
}
printf("***%s***\n", failed ? "FAILED" : "PASSED");
return failed ? EXIT_FAILURE : EXIT_SUCCESS;
}
|
GB_binop__ge_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ge_fp64
// A.*B function (eWiseMult): GB_AemultB__ge_fp64
// A*D function (colscale): GB_AxD__ge_fp64
// D*A function (rowscale): GB_DxB__ge_fp64
// C+=B function (dense accum): GB_Cdense_accumB__ge_fp64
// C+=b function (dense accum): GB_Cdense_accumb__ge_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_fp64
// C=scalar+B GB_bind1st__ge_fp64
// C=scalar+B' GB_bind1st_tran__ge_fp64
// C=A+scalar GB_bind2nd__ge_fp64
// C=A'+scalar GB_bind2nd_tran__ge_fp64
// C type: bool
// A type: double
// B,b type: double
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_FP64 || GxB_NO_GE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ge_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__ge_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ge_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ge_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ge_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__ge_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
TempMemory.h |
#ifndef _TEMPMEMORY_H
#define _TEMPMEMORY_H
#include "GlobalInc.h"
#include <omp.h>
template <class PARAM_TYPE> class TempCMem;
template <class PARAM_TYPE> class TempBuffMem;
extern char *alloc_buffer(size_t Nelem);
extern void free_buffer(char *Ptr);
extern void memory_abort ();
/*#ifdef LARGE_BUFF
#define VMS_DIR "CEA_VM_DIR"
#define VMS_SIZE "CEA_VM_SIZE"
void vms_init(int UserSize, char * UserName, Bool Verbose);
#endif*/
#undef MEM_NOT_MANADGE
#define MEM_NOT_MANADGE 1 //FCS Modified
#undef DEBUG_MEM
#define DEBUG_MEM 0
#define MAX_IMA_IN_MEM 500
//******************************************************************************
// external var
//*****************************************************************************/
extern TempCMem<int> MemInt;
extern TempCMem<float> MemFloat;
extern TempCMem<double> MemDouble;
extern TempCMem<unsigned long> MemULong;//FCS ADDED
extern TempCMem<complex_f> MemCF;
extern TempCMem<complex_d> MemCD;
extern Bool UseVMS;
//*****************************************************************************/
// Template TempCMem class
//*****************************************************************************/
template <class PARAM_TYPE>
class TempCMem {
private:
#if MEM_NOT_MANADGE==0
TempBuffMem<PARAM_TYPE> TabBuffMem[MAX_IMA_IN_MEM];
#endif
public:
TempCMem (){}
~TempCMem (){}
PARAM_TYPE* alloc (int Nelem);
void free (PARAM_TYPE *Ptr_Data);
};
//*****************************************************************************/
// Template TempBuffMem class
//*****************************************************************************/
template <class PARAM_TYPE>
class TempBuffMem {
private:
PARAM_TYPE* Ptr;
int Size;
Bool Use;
public:
TempBuffMem () {Size = 0; Use = False;}
PARAM_TYPE* alloc (int Nelem) ;
void give_back () {Use = False;}
void take () {Use = True;}
int size () {return Size;}
Bool use () {return Use;}
PARAM_TYPE* buffer () { return Ptr;}
~TempBuffMem ();
};
//------------------------------------------------------------------------------
// class TempCMem<>::alloc ()
//------------------------------------------------------------------------------
template <class PARAM_TYPE>
PARAM_TYPE* TempCMem<PARAM_TYPE>::alloc (int Nelem) {
PARAM_TYPE* Pf=NULL;
//FCS MODIFIED TO TAKE THE BUFFER IN THE WHILE LOOP AND BE CRITICAL FOR OPENMP
#if MEM_NOT_MANADGE==1
#ifdef _OPENMP
#pragma omp critical ( tempmem_alloc )
{
#endif
Pf = (PARAM_TYPE *) malloc (Nelem*sizeof(PARAM_TYPE));
if (Pf == NULL) memory_abort ();
#ifdef _OPENMP
#if DEBUG_MEM
int thr_num=omp_get_thread_num();
cout << "Thr: " << thr_num << " Alloc Index Memory Ptr = " << Pf <<" Nelem= "<<Nelem << endl;
#endif
}
#endif
return Pf;
#else
int i=0;
Bool Find=False;
#ifdef _OPENMP
#pragma omp critical ( tempmem_alloc2 )
{
#endif
while ( (TabBuffMem[i].size() != 0)
&& (!Find) && (i < MAX_IMA_IN_MEM)) {
if ((TabBuffMem[i].use()) || (TabBuffMem[i].size() != Nelem)) i++;
else {
Find = True;
TabBuffMem[i].take ();
Pf = TabBuffMem[i].buffer();
break;
}
}
if (i < MAX_IMA_IN_MEM) {
Pf = TabBuffMem[i].alloc (Nelem);
#if DEBUG_MEM
#ifdef _OPENMP
int thr_num=omp_get_thread_num();
cout << "Thr: " << thr_num << " Alloc Index Memory = " << i << " Ptr = " << Pf << " Of size = " << TabBuffMem[i].size() << endl;
#else
cout << "Alloc Create: " << i << " Ptr = " << Pf << endl;
#endif
#endif
} else {
cerr << "Error: CMemInt cannot allocate memory ... " << endl;
system ("pstat -s");
i=0;
while ((TabBuffMem[i].size() != 0) && (i < MAX_IMA_IN_MEM)) {
cout << "Buffer " << i << " Size = " << TabBuffMem[i].size();
if (TabBuffMem[i].use()) cout << " USE " << endl;
else cout << " NOT USE " << endl;
i++;
}
exit (0);
}
#ifdef _OPENMP
}
#endif
return Pf;
#endif
};
//------------------------------------------------------------------------------
// class TempCMem<>::free (PARAM_TYPE *Ptr_Data)
//------------------------------------------------------------------------------
template <class PARAM_TYPE>
void TempCMem<PARAM_TYPE>::free (PARAM_TYPE *Ptr_Data) {
#if MEM_NOT_MANADGE==1
#ifdef _OPENMP
#pragma omp critical ( tempmem_alloc3 )
{
#if DEBUG_MEM
int thr_num=omp_get_thread_num();
cout << "Thr: " << thr_num << " Dealloc Index Memory Ptr = " << Ptr_Data << endl;
#endif
#endif
free_buffer((char *) Ptr_Data);
#ifdef _OPENMP
}
#endif
#else
int i=0;
Bool Find=False;
#ifdef _OPENMP
#pragma omp critical ( tempmem_alloc4 )
{
#endif
while ( (TabBuffMem[i].size() != 0)
&& (!Find) && (i < MAX_IMA_IN_MEM)) {
if (Ptr_Data == TabBuffMem[i].buffer()) Find = True;
else i ++;
}
#if DEBUG_MEM
#ifdef _OPENMP
int thr_num=omp_get_thread_num();
cout << "Thr: " << thr_num << " Dealloc Index Memory = " << i << " Ptr = " << Ptr_Data << " Of size = " << TabBuffMem[i].size() << endl;
#else
cout << "DeAlloc: " << i << " Ptr = " << Ptr_Data << endl;
#endif
#endif
if (Find) TabBuffMem[i].give_back();
else {
cerr << "Error: CMemInt cannot deallocate the memory ... "<< Ptr_Data << endl;
exit (0);
}
#ifdef _OPENMP
}
#endif
#endif
};
//------------------------------------------------------------------------------
// class TempBuffMem<>::alloc (int Nelem)
//------------------------------------------------------------------------------
template <class PARAM_TYPE>
PARAM_TYPE* TempBuffMem<PARAM_TYPE>::alloc (int Nelem) {
PARAM_TYPE *Ptr=NULL;
Ptr = (PARAM_TYPE*) alloc_buffer((size_t) (Nelem * sizeof(PARAM_TYPE)));
Size = Nelem;
Use = True;
return Ptr;
};
//------------------------------------------------------------------------------
// class ~TempBuffMem ()
//------------------------------------------------------------------------------
template <class PARAM_TYPE>
TempBuffMem<PARAM_TYPE>::~TempBuffMem() {
if (Size != 0) free_buffer ((char *) Ptr);
Size = 0;
Use = False;
};
//******************************************************************************
// template free function
//*****************************************************************************/
template <class PARAM_TYPE>
inline PARAM_TYPE* temp_alloc(int Nelem, PARAM_TYPE& Dummy) {
PARAM_TYPE* Ptr=NULL;
Ptr = (PARAM_TYPE*) alloc_buffer((size_t) (Nelem*sizeof(PARAM_TYPE)));
return Ptr;
};
inline float * f_alloc(int Nelem) {float Dummy;return temp_alloc(Nelem,Dummy);};
inline int * i_alloc(int Nelem) {int Dummy;return temp_alloc(Nelem,Dummy);};
inline unsigned int * ui_alloc(int Nelem) {unsigned int Dummy;return temp_alloc(Nelem,Dummy);};
inline short * s_alloc(int Nelem) {short Dummy; return temp_alloc(Nelem,Dummy);};
inline unsigned short * us_alloc(int Nelem) {unsigned short Dummy;return temp_alloc(Nelem,Dummy);};
inline char * c_alloc(int Nelem) {char Dummy; return temp_alloc(Nelem,Dummy);};
inline unsigned char * uc_alloc(int Nelem) {unsigned char Dummy; return temp_alloc(Nelem,Dummy);};
template <class PARAM_TYPE>
inline void temp_free(PARAM_TYPE* Ptr) {
free_buffer((char *) Ptr);
};
inline void f_free(float *ptr) {temp_free(ptr);};
inline void i_free(int *ptr) {temp_free(ptr);};
inline void s_free(short *ptr) {temp_free(ptr);};
inline void c_free(char *ptr) {temp_free(ptr);};
inline void ui_free(unsigned int *ptr) {temp_free(ptr);};
inline void us_free(unsigned short *ptr) {temp_free(ptr);};
inline void uc_free(unsigned char *ptr) {temp_free(ptr);};
//******************************************************************************
// some alloc and free ....
//*****************************************************************************/
template <class PARAM_TYPE>
inline PARAM_TYPE* vector_alloc(int Nelem, PARAM_TYPE& Dummy) {
PARAM_TYPE* Vector=NULL;
Vector = new PARAM_TYPE[Nelem];
if (Vector == NULL) memory_abort();
return Vector;
};
inline double *d_vector_alloc(int Nbr_Elem) {
double Dummy;return vector_alloc(Nbr_Elem,Dummy);};
inline float *f_vector_alloc(int Nbr_Elem) {
float Dummy;return vector_alloc(Nbr_Elem,Dummy);};
inline int *i_vector_alloc(int Nbr_Elem) {
int Dummy;return vector_alloc(Nbr_Elem,Dummy);};
inline complex_f *cf_vector_alloc(int Nbr_Elem) {
complex_f Dummy;return vector_alloc(Nbr_Elem,Dummy);};
template <class PARAM_TYPE>
inline void matrix_free(PARAM_TYPE **matrix, int nbr_lin) {
for (int i=0; i<nbr_lin; i++) delete [] matrix[i];
delete [] matrix;
}
inline void i_matrix_free(int **matrix, int nbr_lin) {matrix_free (matrix, nbr_lin);}
inline void f_matrix_free(float **matrix, int nbr_lin) {matrix_free (matrix, nbr_lin);}
inline void cf_matrix_free(complex_f **matrix, int nbr_lin) {matrix_free (matrix, nbr_lin);}
template <class PARAM_TYPE>
inline PARAM_TYPE** matrix_alloc(int nbr_lin, int nbr_col,PARAM_TYPE Dummy) {
//auto
PARAM_TYPE** matrix=NULL;
//register
int i;
matrix = new PARAM_TYPE* [nbr_lin];
if (matrix == NULL) memory_abort();
for (i=0; i<nbr_lin; i++) {
matrix[i] = new PARAM_TYPE [nbr_col];
if (matrix[i] == NULL) memory_abort();
}
return(matrix);
}
inline int** i_matrix_alloc(int nbr_lin, int nbr_col) {
int Dummy=0; return matrix_alloc(nbr_lin,nbr_col,Dummy);
}
inline float** f_matrix_alloc(int nbr_lin, int nbr_col) {
float Dummy=0; return matrix_alloc(nbr_lin,nbr_col,Dummy);
}
inline complex_f** cf_matrix_alloc(int nbr_lin, int nbr_col) {
complex_f Dummy; return matrix_alloc(nbr_lin,nbr_col,Dummy);
}
/**********************************************************/
/**********************************************************/
/**********************************************************/
inline void MemMg_free (float* po_Buffer) {MemFloat.free (po_Buffer);}
inline float* MemMg_alloc (int Size, float Dummy) {return (MemFloat.alloc (Size));}
inline void MemMg_free (double* po_Buffer) {MemDouble.free (po_Buffer);}
inline double* MemMg_alloc (int Size, double Dummy) {return (MemDouble.alloc (Size));}
inline void MemMg_free (int* po_Buffer) {MemInt.free (po_Buffer);}
inline int* MemMg_alloc (int Size, int Dummy) {return (MemInt.alloc (Size));}
//FCS ADDED
inline void MemMg_free (unsigned long* po_Buffer) {MemULong.free (po_Buffer);}
inline unsigned long* MemMg_alloc (int Size, unsigned long Dummy) {return (MemULong.alloc (Size));}
//END FCS ADDED
inline void MemMg_free (complex_f* po_Buffer) {MemCF.free (po_Buffer);}
inline complex_f* MemMg_alloc (int Size, complex_f Dummy) {return (MemCF.alloc (Size));}
inline void MemMg_free (complex_d* po_Buffer) {MemCD.free (po_Buffer);}
inline complex_d* MemMg_alloc (int Size, complex_d Dummy) {return (MemCD.alloc (Size));}
#endif
|
sell_spmv_vsx.c | // CURRENTLY BROKEN
#include <altivec.h>
static void SELL_kernel_VSX (ghost_sparsemat *mat, ghost_densemat * lhs, ghost_densemat * invec, int options)
{
ghost_lidx c,j;
ghost_lidx offs;
vector double tmp;
vector double val;
vector double rhs;
#pragma omp parallel for schedule(runtime) private(j,tmp,val,rhs,offs)
for (c=0; c<SPM_NROWSPAD(mat)>>1; c++)
{ // loop over chunks
tmp = vec_splats(0.);
offs = mat->chunkStart[c];
for (j=0; j<(mat->chunkStart[c+1]-mat->chunkStart[c])>>1; j++)
{ // loop inside chunk
val = vec_xld2(offs*sizeof(ghost_dt),mat->val); // load values
rhs = vec_insert(invec->val[mat->col[offs++]],rhs,0);
rhs = vec_insert(invec->val[mat->col[offs++]],rhs,1);
tmp = vec_madd(val,rhs,tmp);
}
if (options & GHOST_SPMV_AXPY) {
vec_xstd2(vec_add(tmp,vec_xld2(c*mat->chunkHeight*sizeof(ghost_dt),lhs->val)),c*mat->chunkHeight*sizeof(ghost_dt),lhs->val);
} else {
vec_xstd2(tmp,c*mat->chunkHeight*sizeof(ghost_dt),lhs->val);
}
}
}
|
core_zsymm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_symm
*
* Performs one of the matrix-matrix operations
*
* \f[ C = \alpha \times A \times B + \beta \times C \f]
* or
* \f[ C = \alpha \times B \times A + \beta \times C \f]
*
* where alpha and beta are scalars, A is a symmetric matrix and B and
* C are m-by-n matrices.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether the symmetric matrix A appears on the
* left or right in the operation as follows:
* - PlasmaLeft: \f[ C = \alpha \times A \times B + \beta \times C \f]
* - PlasmaRight: \f[ C = \alpha \times B \times A + \beta \times C \f]
*
* @param[in] uplo
* Specifies whether the upper or lower triangular part of
* the symmetric matrix A is to be referenced as follows:
* - PlasmaLower: Only the lower triangular part of the
* symmetric matrix A is to be referenced.
* - PlasmaUpper: Only the upper triangular part of the
* symmetric matrix A is to be referenced.
*
* @param[in] m
* The number of rows of the matrix C. m >= 0.
*
* @param[in] n
* The number of columns of the matrix C. n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* A is an lda-by-ka matrix, where ka is m when side = PlasmaLeft,
* and is n otherwise. Only the uplo triangular part is referenced.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,ka).
*
* @param[in] B
* B is an ldb-by-n matrix, where the leading m-by-n part of
* the array B must contain the matrix B.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* C is an ldc-by-n matrix.
* On exit, the array is overwritten by the m-by-n updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_zsymm(plasma_enum_t side, plasma_enum_t uplo,
int m, int n,
plasma_complex64_t alpha, const plasma_complex64_t *A, int lda,
const plasma_complex64_t *B, int ldb,
plasma_complex64_t beta, plasma_complex64_t *C, int ldc)
{
cblas_zsymm(CblasColMajor,
(CBLAS_SIDE)side, (CBLAS_UPLO)uplo,
m, n,
CBLAS_SADDR(alpha), A, lda,
B, ldb,
CBLAS_SADDR(beta), C, ldc);
}
/******************************************************************************/
void plasma_core_omp_zsymm(
plasma_enum_t side, plasma_enum_t uplo,
int m, int n,
plasma_complex64_t alpha, const plasma_complex64_t *A, int lda,
const plasma_complex64_t *B, int ldb,
plasma_complex64_t beta, plasma_complex64_t *C, int ldc,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int ak;
if (side == PlasmaLeft)
ak = m;
else
ak = n;
#pragma omp task depend(in:A[0:lda*ak]) \
depend(in:B[0:ldb*n]) \
depend(inout:C[0:ldc*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_zsymm(side, uplo,
m, n,
alpha, A, lda,
B, ldb,
beta, C, ldc);
}
}
|
Interp1PrimFifthOrderCRWENO.c | /*! @file Interp1PrimFifthOrderCRWENO.c
* @brief CRWENO5 Scheme (Component-wise application to vectors).
* @author Debojyoti Ghosh
*/
#include <stdio.h>
#include <basic.h>
#include <arrayfunctions.h>
#include <mathfunctions.h>
#include <interpolation.h>
#include <tridiagLU.h>
#include <mpivars.h>
#include <hypar.h>
#ifdef with_omp
#include <omp.h>
#endif
#undef _MINIMUM_GHOSTS_
/*! \def _MINIMUM_GHOSTS_
* Minimum number of ghost points required for this interpolation
* method.
*/
#define _MINIMUM_GHOSTS_ 3
/*! @brief 5th order CRWENO reconstruction (component-wise) on a uniform grid
Computes the interpolated values of the first primitive of a function \f${\bf f}\left({\bf u}\right)\f$
at the interfaces from the cell-centered values of the function using the fifth order CRWENO scheme on a
uniform grid. The first primitive is defined as a function \f${\bf h}\left({\bf u}\right)\f$ that satisfies:
\f{equation}{
{\bf f}\left({\bf u}\left(x\right)\right) = \frac{1}{\Delta x} \int_{x-\Delta x/2}^{x+\Delta x/2} {\bf h}\left({\bf u}\left(\zeta\right)\right)d\zeta,
\f}
where \f$x\f$ is the spatial coordinate along the dimension of the interpolation. This function computes the 5th order CRWENO numerical approximation
\f$\hat{\bf f}_{j+1/2} \approx {\bf h}_{j+1/2}\f$ as the convex combination of three 3rd order methods:
\f{align}{
&\ \omega_1\ \times\ \left[ \frac{2}{3}\hat{\bf f}_{j-1/2} + \frac{1}{3}\hat{\bf f}_{j+1/2} = \frac{1}{6} \left( f_{j-1} + 5f_j \right) \right]\\
+ &\ \omega_2\ \times\ \left[ \frac{1}{3}\hat{\bf f}_{j-1/2}+\frac{2}{3}\hat{\bf f}_{j+1/2} = \frac{1}{6} \left( 5f_j + f_{j+1} \right) \right] \\
+ &\ \omega_3\ \times\ \left[ \frac{2}{3}\hat{\bf f}_{j+1/2} + \frac{1}{3}\hat{\bf f}_{j+3/2} = \frac{1}{6} \left( f_j + 5f_{j+1} \right) \right] \\
\Rightarrow &\ \left(\frac{2}{3}\omega_1+\frac{1}{3}\omega_2\right)\hat{\bf f}_{j-1/2} + \left[\frac{1}{3}\omega_1+\frac{2}{3}(\omega_2+\omega_3)\right]\hat{\bf f}_{j+1/2} + \frac{1}{3}\omega_3\hat{\bf f}_{j+3/2} = \frac{\omega_1}{6}{\bf f}_{j-1} + \frac{5(\omega_1+\omega_2)+\omega_3}{6}{\bf f}_j + \frac{\omega_2+5\omega_3}{6}{\bf f}_{j+1},
\f}
where \f$\omega_k; k=1,2,3\f$ are the nonlinear WENO weights computed in WENOFifthOrderCalculateWeights() (note that the \f$\omega\f$ are different for each component of the vector \f$\hat{\bf f}\f$). The resulting tridiagonal system is solved using tridiagLU() (see also #TridiagLU, tridiagLU.h).
\b Implementation \b Notes:
+ This method assumes a uniform grid in the spatial dimension corresponding to the interpolation.
+ The method described above corresponds to a left-biased interpolation. The corresponding right-biased
interpolation can be obtained by reflecting the equations about interface j+1/2.
+ The scalar interpolation method is applied to the vector function in a component-wise manner.
+ The function computes the interpolant for the entire grid in one call. It loops over all the grid lines along the interpolation direction
and carries out the 1D interpolation along these grid lines.
+ Location of cell-centers and cell interfaces along the spatial dimension of the interpolation is shown in the following figure:
@image html chap1_1Ddomain.png
@image latex chap1_1Ddomain.eps width=0.9\textwidth
\b Function \b arguments:
Argument | Type | Explanation
--------- | --------- | ---------------------------------------------
fI | double* | Array to hold the computed interpolant at the grid interfaces. This array must have the same layout as the solution, but with \b no \b ghost \b points. Its size should be the same as u in all dimensions, except dir (the dimension along which to interpolate) along which it should be larger by 1 (number of interfaces is 1 more than the number of interior cell centers).
fC | double* | Array with the cell-centered values of the flux function \f${\bf f}\left({\bf u}\right)\f$. This array must have the same layout and size as the solution, \b with \b ghost \b points.
u | double* | The solution array \f${\bf u}\f$ (with ghost points). If the interpolation is characteristic based, this is needed to compute the eigendecomposition. For a multidimensional problem, the layout is as follows: u is a contiguous 1D array of size (nvars*dim[0]*dim[1]*...*dim[D-1]) corresponding to the multi-dimensional solution, with the following ordering - nvars, dim[0], dim[1], ..., dim[D-1], where nvars is the number of solution components (#HyPar::nvars), dim is the local size (#HyPar::dim_local), D is the number of spatial dimensions.
x | double* | The grid array (with ghost points). This is used only by non-uniform-grid interpolation methods. For multidimensional problems, the layout is as follows: x is a contiguous 1D array of size (dim[0]+dim[1]+...+dim[D-1]), with the spatial coordinates along dim[0] stored from 0,...,dim[0]-1, the spatial coordinates along dim[1] stored along dim[0],...,dim[0]+dim[1]-1, and so forth.
upw | int | Upwinding direction: if positive, a left-biased interpolant will be computed; if negative, a right-biased interpolant will be computed. If the interpolation method is central, then this has no effect.
dir | int | Spatial dimension along which to interpolate (eg: 0 for 1D; 0 or 1 for 2D; 0,1 or 2 for 3D)
s | void* | Solver object of type #HyPar: the following variables are needed - #HyPar::ghosts, #HyPar::ndims, #HyPar::nvars, #HyPar::dim_local.
m | void* | MPI object of type #MPIVariables: this is needed only by compact interpolation method that need to solve a global implicit system across MPI ranks.
uflag | int | A flag indicating if the function being interpolated \f${\bf f}\f$ is the solution itself \f${\bf u}\f$ (if 1, \f${\bf f}\left({\bf u}\right) \equiv {\bf u}\f$).
\b Reference:
+ Ghosh, D., Baeder, J. D., Compact Reconstruction Schemes with Weighted ENO Limiting
for Hyperbolic Conservation Laws, SIAM Journal on Scientific Computing, 34 (3), 2012, A1678–A1706,
http://dx.doi.org/10.1137/110857659
+ Ghosh, D., Constantinescu, E. M., Brown, J., Efficient Implementation of Nonlinear Compact Schemes on Massively Parallel Platforms,
SIAM Journal on Scientific Computing, 37 (3), 2015, C354–C383,
http://dx.doi.org/10.1137/140989261
*/
int Interp1PrimFifthOrderCRWENO(
double *fI, /*!< Array of interpolated function values at the interfaces */
double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */
double *u, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */
double *x, /*!< Grid coordinates */
int upw, /*!< Upwind direction (left or right biased) */
int dir, /*!< Spatial dimension along which to interpolation */
void *s, /*!< Object of type #HyPar containing solver-related variables */
void *m, /*!< Object of type #MPIVariables containing MPI-related variables */
int uflag /*!< Flag to indicate if \f$f(u) \equiv u\f$, i.e, if the solution is being reconstructed */
)
{
HyPar *solver = (HyPar*) s;
MPIVariables *mpi = (MPIVariables*) m;
CompactScheme *compact= (CompactScheme*) solver->compact;
WENOParameters *weno = (WENOParameters*) solver->interp;
TridiagLU *lu = (TridiagLU*) solver->lusolver;
int sys,Nsys,d;
_DECLARE_IERR_;
int ghosts = solver->ghosts;
int ndims = solver->ndims;
int nvars = solver->nvars;
int *dim = solver->dim_local;
int *stride= solver->stride_with_ghosts;
/* define some constants */
static const double one_third = 1.0/3.0;
static const double one_sixth = 1.0/6.0;
double *ww1, *ww2, *ww3;
ww1 = weno->w1 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
ww2 = weno->w2 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
ww3 = weno->w3 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir];
/* create index and bounds for the outer loop, i.e., to loop over all 1D lines along
dimension "dir" */
int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims];
_ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1;
_ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1;
int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer);
/* calculate total number of tridiagonal systems to solve */
_ArrayProduct1D_(bounds_outer,ndims,Nsys); Nsys *= nvars;
/* Allocate arrays for tridiagonal system */
double *A = compact->A;
double *B = compact->B;
double *C = compact->C;
double *R = compact->R;
#pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI)
for (sys=0; sys < N_outer; sys++) {
_ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0);
_ArrayCopy1D_(index_outer,indexC,ndims);
_ArrayCopy1D_(index_outer,indexI,ndims);
for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
int qm1,qm2,qm3,qp1,qp2,p;
_ArrayIndex1D_(ndims,bounds_inter,indexI,0,p);
if (upw > 0) {
indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
qm3 = qm1 - 2*stride[dir];
qm2 = qm1 - stride[dir];
qp1 = qm1 + stride[dir];
qp2 = qm1 + 2*stride[dir];
} else {
indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1);
qm3 = qm1 + 2*stride[dir];
qm2 = qm1 + stride[dir];
qp1 = qm1 - stride[dir];
qp2 = qm1 - 2*stride[dir];
}
/* Defining stencil points */
double *fm3, *fm2, *fm1, *fp1, *fp2;
fm3 = fC+qm3*nvars;
fm2 = fC+qm2*nvars;
fm1 = fC+qm1*nvars;
fp1 = fC+qp1*nvars;
fp2 = fC+qp2*nvars;
/* Candidate stencils and their optimal weights*/
double f1[nvars], f2[nvars], f3[nvars];
if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 ))
|| ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) {
/* Use WENO5 at the physical boundaries */
_ArrayAXBYCZ_(f1,(2*one_sixth),fm3,(-7*one_sixth) ,fm2,(11*one_sixth) ,fm1,nvars);
_ArrayAXBYCZ_(f2,(-one_sixth) ,fm2,(5*one_sixth) ,fm1,(2*one_sixth) ,fp1,nvars);
_ArrayAXBYCZ_(f3,(2*one_sixth),fm1,(5*one_sixth) ,fp1,(-one_sixth) ,fp2,nvars);
} else {
/* CRWENO5 at the interior points */
_ArrayAXBY_(f1,(one_sixth) ,fm2,(5*one_sixth),fm1,nvars);
_ArrayAXBY_(f2,(5*one_sixth),fm1,(one_sixth) ,fp1,nvars);
_ArrayAXBY_(f3,(one_sixth) ,fm1,(5*one_sixth),fp1,nvars);
}
/* retrieve the WENO weights */
double *w1, *w2, *w3;
w1 = (ww1+p*nvars);
w2 = (ww2+p*nvars);
w3 = (ww3+p*nvars);
if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 ))
|| ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) {
_ArraySetValue_ ((A+Nsys*indexI[dir]+sys*nvars),nvars,0.0)
_ArraySetValue_ ((B+Nsys*indexI[dir]+sys*nvars),nvars,1.0)
_ArraySetValue_ ((C+Nsys*indexI[dir]+sys*nvars),nvars,0.0)
} else {
if (upw > 0) {
_ArrayAXBY_ ((A+Nsys*indexI[dir]+sys*nvars),(2*one_third) ,w1,(one_third) ,w2,nvars);
_ArrayAXBYCZ_ ((B+Nsys*indexI[dir]+sys*nvars),(one_third) ,w1,(2*one_third),w2,(2*one_third),w3,nvars);
_ArrayScaleCopy1D_(w3,(one_third),(C+Nsys*indexI[dir]+sys*nvars),nvars);
} else {
_ArrayAXBY_ ((C+Nsys*indexI[dir]+sys*nvars),(2*one_third) ,w1,(one_third) ,w2,nvars);
_ArrayAXBYCZ_ ((B+Nsys*indexI[dir]+sys*nvars),(one_third) ,w1,(2*one_third),w2,(2*one_third),w3,nvars);
_ArrayScaleCopy1D_(w3,(one_third),(A+Nsys*indexI[dir]+sys*nvars),nvars);
}
}
_ArrayMultiply3Add1D_ ((R+Nsys*indexI[dir]+sys*nvars),w1,f1,w2,f2,w3,f3,nvars);
}
}
#ifdef serial
/* Solve the tridiagonal system */
IERR tridiagLU(A,B,C,R,dim[dir]+1,Nsys,lu,NULL); CHECKERR(ierr);
#else
/* Solve the tridiagonal system */
/* all processes except the last will solve without the last interface to avoid overlap */
if (mpi->ip[dir] != mpi->iproc[dir]-1) { IERR tridiagLU(A,B,C,R,dim[dir] ,Nsys,lu,&mpi->comm[dir]); CHECKERR(ierr); }
else { IERR tridiagLU(A,B,C,R,dim[dir]+1,Nsys,lu,&mpi->comm[dir]); CHECKERR(ierr); }
/* Now get the solution to the last interface from the next proc */
double *sendbuf = compact->sendbuf;
double *recvbuf = compact->recvbuf;
MPI_Request req[2] = {MPI_REQUEST_NULL,MPI_REQUEST_NULL};
if (mpi->ip[dir]) for (d=0; d<Nsys; d++) sendbuf[d] = R[d];
if (mpi->ip[dir] != mpi->iproc[dir]-1) MPI_Irecv(recvbuf,Nsys,MPI_DOUBLE,mpi->ip[dir]+1,214,mpi->comm[dir],&req[0]);
if (mpi->ip[dir]) MPI_Isend(sendbuf,Nsys,MPI_DOUBLE,mpi->ip[dir]-1,214,mpi->comm[dir],&req[1]);
MPI_Waitall(2,&req[0],MPI_STATUS_IGNORE);
if (mpi->ip[dir] != mpi->iproc[dir]-1) for (d=0; d<Nsys; d++) R[d+Nsys*dim[dir]] = recvbuf[d];
#endif
/* save the solution to fI */
#pragma omp parallel for schedule(auto) default(shared) private(sys,d,index_outer,indexC,indexI)
for (sys=0; sys < N_outer; sys++) {
_ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0);
_ArrayCopy1D_(index_outer,indexI,ndims);
for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) {
int p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p);
_ArrayCopy1D_((R+sys*nvars+Nsys*indexI[dir]),(fI+nvars*p),nvars);
}
}
return(0);
}
|
rawMD5flat_fmt_plug.c | /*
* Raw-MD5 "flat intrinsics" experimental format
*
* This software is Copyright (c) 2011-2015 magnum,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
*/
#include "arch.h"
#if USE_EXPERIMENTAL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawMD5f;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawMD5f);
#else
#include <string.h>
#include "md5.h"
#include "common.h"
#include "formats.h"
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include <omp.h>
#endif
#include "simd-intrinsics.h"
#include "memdbg.h"
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5)
#define PLAINTEXT_LENGTH 55
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#else
#define PLAINTEXT_LENGTH 125
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define FORMAT_LABEL "Raw-MD5-flat"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME " (experimental)"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define CIPHERTEXT_LENGTH 32
#define DIGEST_SIZE 16
#define BINARY_SIZE 16 // source()
#define BINARY_ALIGN 4
#define SALT_SIZE 0
#define SALT_ALIGN 1
#define FORMAT_TAG "$dynamic_0$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
static struct fmt_tests tests[] = {
{"5a105e8b9d40e1329780d62ea2265d8a", "test1"},
{FORMAT_TAG "5a105e8b9d40e1329780d62ea2265d8a", "test1"},
{"098f6bcd4621d373cade4e832627b4f6", "test"},
{FORMAT_TAG "378e2c4a07968da2eca692320136433d", "thatsworking"},
{FORMAT_TAG "8ad8757baa8564dc136c1e07507f4a98", "test3"},
{"d41d8cd98f00b204e9800998ecf8427e", ""},
#ifdef DEBUG
#if PLAINTEXT_LENGTH >= 55
{FORMAT_TAG "c9ccf168914a1bcfc3229f1948e67da0","1234567890123456789012345678901234567890123456789012345"},
#if PLAINTEXT_LENGTH >= 80
{FORMAT_TAG "57edf4a22be3c955ac49da2e2107b67a","12345678901234567890123456789012345678901234567890123456789012345678901234567890"},
#endif // 80
#endif // 55
#endif // DEBUG
{NULL}
};
#ifdef SIMD_COEF_32
static ARCH_WORD_32 (*crypt_key)[DIGEST_SIZE/4*NBKEYS];
static ARCH_WORD_32 (*saved_key)[64/4];
static int sz;
#else
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_key)[DIGEST_SIZE/4];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
sz = self->params.max_keys_per_crypt * 64;
#ifndef SIMD_COEF_32
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
#else
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*saved_key), MEM_ALIGN_SIMD);
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*crypt_key), MEM_ALIGN_SIMD);
#endif
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
q = p;
while (atoi16[ARCH_INDEX(*q)] != 0x7F) {
if (*q >= 'A' && *q <= 'F') /* support lowercase only */
return 0;
q++;
}
return !*q && q - p == CIPHERTEXT_LENGTH;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return ciphertext;
memcpy(out, FORMAT_TAG, TAG_LENGTH);
memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1);
return out;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
char *p;
int i;
if (!out) out = mem_alloc_tiny(DIGEST_SIZE, MEM_ALIGN_WORD);
p = ciphertext + TAG_LENGTH;
for (i = 0; i < DIGEST_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
#ifdef SIMD_COEF_32
#define HASH_OFFSET (index&(SIMD_COEF_32-1))+(((unsigned int)index%NBKEYS)/SIMD_COEF_32)*SIMD_COEF_32*4
static int get_hash_0(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; }
#endif
static void set_key(char *key, int index)
{
#ifdef SIMD_COEF_32
int len = strlen(key);
strncpy((char*)saved_key[index], key, sizeof(saved_key[0]));
((unsigned char*)saved_key[index])[len] = 0x80;
saved_key[index][14] = len << 3;
#else
strcpy(saved_key[index], key);
#endif
}
static char *get_key(int index)
{
#ifdef SIMD_COEF_32
int len = saved_key[index][14] >> 3;
((char*)saved_key[index])[len] = 0;
#endif
return (char*)saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#ifdef SIMD_COEF_32
const int inc = SIMD_COEF_32;
#else
const int inc = 1;
#endif
const int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT;
#pragma omp parallel for
for (index = 0; index < loops; index += inc)
#endif
{
#if SIMD_COEF_32
SIMDmd5body(saved_key[index], crypt_key[index/NBKEYS], NULL, SSEi_FLAT_IN);
#else
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));
MD5_Final((unsigned char *)crypt_key[index], &ctx);
#endif
}
return count;
}
static int cmp_all(void *binary, int count) {
int index;
for (index = 0; index < count; index++)
#ifdef SIMD_COEF_32
if (((ARCH_WORD_32 *) binary)[0] == ((ARCH_WORD_32*)crypt_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32])
#else
if ( ((ARCH_WORD_32*)binary)[0] == crypt_key[index][0] )
#endif
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
int i;
for (i = 0; i < BINARY_SIZE/sizeof(ARCH_WORD_32); i++)
if (((ARCH_WORD_32 *) binary)[i] != ((ARCH_WORD_32*)crypt_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32+i*SIMD_COEF_32])
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static char *source(char *source, void *binary)
{
static char Buf[CIPHERTEXT_LENGTH + TAG_LENGTH + 1];
unsigned char *cpi;
char *cpo;
int i;
strcpy(Buf, FORMAT_TAG);
cpo = &Buf[TAG_LENGTH];
cpi = (unsigned char*)(binary);
for (i = 0; i < BINARY_SIZE; ++i) {
*cpo++ = itoa16[(*cpi)>>4];
*cpo++ = itoa16[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
struct fmt_main fmt_rawMD5f = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT,
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL },
source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* USE_EXPERIMENTAL */
|
main.c | // this creates an input device transform (idt) lookup table for a given camera
// based on spectral sensitivity functions (ssf). works best with a high-res
// high-quality spectral upsampling table, create via
// `mkspectra 1024 /dev/null XYZ`. (note the lack of `-b`)
// #define USE_LEVMAR
#include "core/inpaint.h"
#include "core/half.h"
#include "core/clip.h"
#include "core/solve.h"
#include "q2t.h"
#include <strings.h>
#include "../../pipe/modules/i-raw/adobe_coeff.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <math.h>
#include <string.h>
#include <assert.h>
#include <alloca.h>
#ifdef USE_LEVMAR
#include "levmar-2.6/levmar.h"
#endif
#define MIN(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a < _b ? _a : _b; })
#define MAX(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _a : _b; })
#define CLAMP(a,m,M) (MIN(MAX((a), (m)), (M)))
#define CIE_SAMPLES 95
#define CIE_LAMBDA_MIN 360.0
#define CIE_LAMBDA_MAX 830.0
#define CIE_FINE_SAMPLES 95
typedef struct header_t
{ // header of .lut files
uint32_t magic;
uint16_t version;
uint8_t channels;
uint8_t datatype;
uint32_t wd;
uint32_t ht;
}
header_t;
static int num_coeff = 6;
static uint32_t seed = 1337;
static const double srgb_to_xyz[] = {
0.412453, 0.357580, 0.180423,
0.212671, 0.715160, 0.072169,
0.019334, 0.119193, 0.950227
};
static const double xyz_to_rec2020[] = {
1.7166511880, -0.3556707838, -0.2533662814,
-0.6666843518, 1.6164812366, 0.0157685458,
0.0176398574, -0.0427706133, 0.9421031212
};
static const double rec2020_to_xyz[] = {
0.6369580483, 0.1446169036, 0.1688809752,
0.2627002120, 0.6779980715, 0.0593017165,
0.0000000000, 0.0280726930, 1.0609850577,
};
static inline double
xrand()
{ // Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs"
seed ^= seed << 13;
seed ^= seed >> 17;
seed ^= seed << 5;
return seed / 4294967296.0;
}
// bilinear lookup
static inline void
fetch_coeff(
const double *xy, // cie xy chromaticities
const float *spectra, // loaded spectral coeffs, 4-strided
const int wd, // width of texture
const int ht, // height of texture
double *out) // bilinear lookup will end up here
{
out[0] = out[1] = out[2] = 0.0;
if(xy[0] < 0 || xy[1] < 0 || xy[0] > 1.0 || xy[1] > 1.0) return;
double tc[] = {xy[0], xy[1]};
tri2quad(tc+0, tc+1);
double xf = tc[0]*wd, yf = tc[1]*ht;
int x0 = (int)CLAMP(xf, 0, wd-1), y0 = (int)CLAMP(yf, 0, ht-1);
int x1 = (int)CLAMP(x0+1, 0, wd-1), y1 = (int)CLAMP(y0+1, 0, ht-1);
int dx = x1 - x0, dy = y1 - y0;
double u = xf - x0, v = yf - y0;
const float *c = spectra + 4*(y0*wd + x0);
out[0] = out[1] = out[2] = 0.0;
for(int k=0;k<3;k++) out[k] += (1.0-u)*(1.0-v)*c[k];
for(int k=0;k<3;k++) out[k] += ( u)*(1.0-v)*c[k + 4*dx];
for(int k=0;k<3;k++) out[k] += (1.0-u)*( v)*c[k + 4*wd*dy];
for(int k=0;k<3;k++) out[k] += ( u)*( v)*c[k + 4*(wd*dy+dx)];
}
// nearest neighbour lookup
static inline void
fetch_coeffi(
const double *xy,
const float *spectra,
const int wd,
const int ht,
double *out)
{
out[0] = out[1] = out[2] = 0.0;
if(xy[0] < 0 || xy[1] < 0 || xy[0] > 1.0 || xy[1] > 1.0) return;
double tc[] = {xy[0], xy[1]};
tri2quad(tc+0, tc+1);
int xi = (int)CLAMP(tc[0]*wd+0.5, 0, wd-1), yi = (int)CLAMP(tc[1]*ht+0.5, 0, ht-1);
const float *c = spectra + 4*(yi*wd + xi);
out[0] = c[0]; out[1] = c[1]; out[2] = c[2];
}
// sample a cubic b-spline around (0.5, 0.5, 0.5)
static inline void
sample_rgb(double *rgb, double delta, int clamp)
{
for(int k=0;k<3;k++)
{
rgb[k] = 0.5;
for(int i=0;i<3;i++)
rgb[k] += delta*(2.0*xrand()-1.0);
if(clamp) rgb[k] = CLAMP(rgb[k], 0.0, 1.0);
}
}
static inline double
normalise1(double *col)
{
const double b = col[0] + col[1] + col[2];
for(int k=0;k<3;k++) col[k] /= b;
return b;
}
static inline double
poly(const double *c, double lambda, int num)
{
double r = 0.0;
for(int i=0;i<num;i++)
r = r * lambda + c[i];
return r;
}
static inline double
sigmoid(double x)
{
return 0.5 * x / sqrt(1.0 + x * x) + 0.5;
}
static inline double
ddx_sigmoid(double x)
{
return 0.5 * pow(x*x+1.0, -3.0/2.0);
}
static inline double
eval_ref(
const double (*cfa_spec)[4],
const int channel,
const int cnt,
const double *cf)
{
double out = 0.0;
for(int i=0;i<cnt;i++)
{
double lambda = cfa_spec[i][0];
double s = sigmoid(poly(cf, lambda, 3));
double t = cfa_spec[i][1+channel];
out += s*t;
}
return out * (cfa_spec[cnt-1][0] - cfa_spec[0][0]) / (double) cnt;
}
static inline double
eval(
const double *cp, // spectrum with np coeffs, use normalised lambda (for optimiser)
const double *cf, // spectrum with nf coeffs, use lambda in nanometers
int np,
int nf)
{
double out = 0.0;
for (int i = 0; i < CIE_FINE_SAMPLES; ++i)
{
double lambda0 = (i+.5)/(double)CIE_FINE_SAMPLES;
double lambda1 = lambda0 * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) + CIE_LAMBDA_MIN;
double s = sigmoid(poly(cf, lambda1, nf)); // from map, use real lambda
double t = sigmoid(poly(cp, lambda0, np)); // optimising this, use normalised for smaller values (assumption of hessian approx)
out += s * t;
}
return out * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) / (double)CIE_FINE_SAMPLES;
}
static inline double
ddp_eval(
const double *cp, // parameters, cfa spectrum
const double *cf, // coefficients of data point
int np, // number of parameter coeffs
int nf, // number of data point coeffs
double *jac) // output: gradient dx/dp{0..np-1} (i.e. np elements)
{
double out = 0.0;
double ddp_poly[20];
double ddp_t[20];
for(int j=0;j<np;j++) jac[j] = 0.0;
for (int i = 0; i < CIE_FINE_SAMPLES; ++i)
{
double lambda0 = (i+.5)/(double)CIE_FINE_SAMPLES;
double lambda1 = lambda0 * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) + CIE_LAMBDA_MIN;
double x = poly(cf, lambda1, nf); // from map, use real lambda
double y = poly(cp, lambda0, np); // optimising this, use normalised for smaller values (assumption of hessian approx)
double s = sigmoid(x);
double t = sigmoid(y);
double ddx_sig = ddx_sigmoid(y);
// ddp t = ddx_sigmoid(poly(cp, lambda0, np)) * { ddp_poly(cp, lambda0, np) }
// where ddp_poly = (lambda0^{num_coeff-1}, lambda0^{num_coeff-2}, .., lambda0, 1)
ddp_poly[0] = 1.0;
ddp_poly[1] = lambda0;
ddp_poly[2] = lambda0 * lambda0;
for(int j=3;j<np;j++) ddp_poly[j] = (j&1? ddp_poly[j/2] * ddp_poly[j/2+1] : ddp_poly[j/2]*ddp_poly[j/2]);
for(int j=0;j<np;j++) ddp_t[j] = ddx_sig * ddp_poly[np-j-1];
// now out = sum(s * t)
// ddp out = sum ddp s*t = sum s * ddp t
out += s * t;
for(int j=0;j<np;j++) jac[j] += s * ddp_t[j];
}
// apply final scale:
for(int j=0;j<np;j++) jac[j] *= (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) / (double)CIE_FINE_SAMPLES;
// also return regular value, we need it for the chain rule
return out * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) / (double)CIE_FINE_SAMPLES;
}
static inline double
mat3_det(const double *const restrict a)
{
#define A(y, x) a[(y - 1) * 3 + (x - 1)]
return
A(1, 1) * (A(3, 3) * A(2, 2) - A(3, 2) * A(2, 3)) -
A(2, 1) * (A(3, 3) * A(1, 2) - A(3, 2) * A(1, 3)) +
A(3, 1) * (A(2, 3) * A(1, 2) - A(2, 2) * A(1, 3));
}
static inline double
mat3_inv(const double *const restrict a, double *const restrict inv)
{
const double det = mat3_det(a);
if(!(det != 0.0)) return 0.0;
const double invdet = 1.0 / det;
inv[3*0+0] = invdet * (A(3, 3) * A(2, 2) - A(3, 2) * A(2, 3));
inv[3*0+1] = -invdet * (A(3, 3) * A(1, 2) - A(3, 2) * A(1, 3));
inv[3*0+2] = invdet * (A(2, 3) * A(1, 2) - A(2, 2) * A(1, 3));
inv[3*1+0] = -invdet * (A(3, 3) * A(2, 1) - A(3, 1) * A(2, 3));
inv[3*1+1] = invdet * (A(3, 3) * A(1, 1) - A(3, 1) * A(1, 3));
inv[3*1+2] = -invdet * (A(2, 3) * A(1, 1) - A(2, 1) * A(1, 3));
inv[3*2+0] = invdet * (A(3, 2) * A(2, 1) - A(3, 1) * A(2, 2));
inv[3*2+1] = -invdet * (A(3, 2) * A(1, 1) - A(3, 1) * A(1, 2));
inv[3*2+2] = invdet * (A(2, 2) * A(1, 1) - A(2, 1) * A(1, 2));
return det;
#undef A
}
static inline void
mat3_mulv(
const double *const restrict a,
const double *const restrict v,
double *const restrict res)
{
res[0] = res[1] = res[2] = 0.0f;
for(int j=0;j<3;j++)
for(int i=0;i<3;i++)
res[j] += a[3*j+i] * v[i];
}
// jacobian for levmar:
void lm_jacobian(
double *p, // parameters: num_coeff * 3 for camera cfa spectra
double *jac, // output: derivative dx / dp (n x m entries, n-major, i.e. (dx[0]/dp[0], dx[0]/dp[1], ..)
int m, // number of parameters (=num_coeff*3)
int n, // number of data points
void *data)
{
double *cf = data;
memset(jac, 0, sizeof(jac[0])*m*n);
double *Je = alloca(3*m*sizeof(double)); // dxdp
int num = n/2;
for(int i=0;i<num;i++) // for all rgb data points
{
memset(Je, 0, sizeof(double)*3*m);
// get derivative dx[3*i+{0,1,2}] / dp[3 * num_coeff]
double rgb[3];
rgb[0] = ddp_eval(p+0*num_coeff, cf + 3*i, num_coeff, 3, Je + 0*m);
rgb[1] = ddp_eval(p+1*num_coeff, cf + 3*i, num_coeff, 3, Je + 1*m+1*num_coeff);
rgb[2] = ddp_eval(p+2*num_coeff, cf + 3*i, num_coeff, 3, Je + 2*m+2*num_coeff);
double b = rgb[0]+rgb[1]+rgb[2];
double ib = 1.0/(b*b);
// account for normalise1:
// we got n = rgb/sum(rgb), so
// ddr r/(r+g+b) => ddx x/(x+c) = c / (x+c)^2
// ddr g/(r+g+b) => ddx a/(x+b) = - a / (x+b)^2
// / ddr r, ddg r, ddb r \
// Jn = | ddr g, ddg g, ddb g |
// \ ddr b, ddg b, ddb b /
double Jn[] = {
(rgb[1]+rgb[2])*ib, -rgb[0]*ib, -rgb[0]*ib,
-rgb[1]*ib, (rgb[0]+rgb[2])*ib, -rgb[1]*ib,
-rgb[2]*ib, -rgb[2]*ib, (rgb[0]+rgb[1])*ib,
};
// ddp n(rgb) = ddx n(rgb) * ddp rgb
// Jn `-----' =Je see above
// 3x3 (3*num_coeff)x3
for(int j=0;j<m;j++) // parameter number
for(int k=0;k<2;k++) // rgb colour channel
for(int l=0;l<3;l++)
jac[(2*i + k)*m + j] += Jn[3*k+l] * Je[m*l + j];
}
}
// callback for levmar:
void lm_callback(
double *p, // parameers: num_coeff * 3 for camera cfa spectra
double *x, // output data, write here
int m, // number of parameters
int n, // number of data points, i.e. colour spots
void *data)
{
double *cf = data;
int num = n/2;
for(int i=0;i<num;i++)
{
double rgb[3] = {
eval(p+0*num_coeff, cf + 3*i, num_coeff, 3),
eval(p+1*num_coeff, cf + 3*i, num_coeff, 3),
eval(p+2*num_coeff, cf + 3*i, num_coeff, 3)};
normalise1(rgb);
x[2*i+0] = rgb[0];
x[2*i+1] = rgb[1];
}
}
void lm_jacobian_dif(
double *p, // parameters: num_coeff * 3 for camera cfa spectra
double *jac, // output: derivative dx / dp (n x m entries, n-major, i.e. (dx[0]/dp[0], dx[0]/dp[1], ..)
int m, // number of parameters (=num_coeff*3)
int n, // number of data points
void *data)
{
for(int j=0;j<m;j++)
{
double X1[n];
double X2[n];
double cfa2[m];
const double h = 1e-10;
memcpy(cfa2, p, sizeof(cfa2));
cfa2[j] += h;
lm_callback(cfa2, X1, m, n, data);
memcpy(cfa2, p, sizeof(cfa2));
cfa2[j] -= h;
lm_callback(cfa2, X2, m, n, data);
for(int k=0;k<n;k++)
jac[m*k + j] = (X1[k] - X2[k]) / (2.0*h);
}
}
static inline int
load_reference_cfa_spectra(
const char *model,
double cfa_spec[100][4])
{
char filename[256];
snprintf(filename, sizeof(filename), "%s.txt", model);
int len = strlen(filename), cfa_spec_cnt = 0;
for(int i=0;i<len;i++) if(filename[i]==' ') filename[i] = '_';
FILE *fr = fopen(filename, "rb");
if(fr)
{
while(!feof(fr))
{
if(4 != fscanf(fr, "%lg %lg %lg %lg",
cfa_spec[cfa_spec_cnt] + 0, cfa_spec[cfa_spec_cnt] + 1,
cfa_spec[cfa_spec_cnt] + 2, cfa_spec[cfa_spec_cnt] + 3))
cfa_spec_cnt--; // do nothing, we'll ignore comments and stuff gone wrong
fscanf(fr, "*[^\n]"); fgetc(fr);
cfa_spec_cnt++;
}
fclose(fr);
}
else fprintf(stderr, "[vkdt-mkidt] can't open reference response curves! `%s'\n", filename);
return cfa_spec_cnt;
}
// print xyz and rgb pairs for vector plots:
static inline void
write_sample_points(
const char *basename,
const double *cfa,
const float *spectra,
const header_t *sh,
const double (*cfa_spec)[4],
const int cfa_spec_cnt,
const double *xyz_to_cam,
const double *cam_to_xyz,
float *chroma,
const int cwd,
const int cht)
{
char filename[256] = {0};
snprintf(filename, sizeof(filename), "%s_points.dat", basename);
FILE *f0 = fopen(filename, "wb");
if(!f0) return;
for(int i=0;i<2000;i++)
{ // pick a random srgb colour inside gamut
double rgb[3] = {0.0}, xyz[3] = {0.0}, xyz2[3], xyz3[3], cf[3], cam_rgb[3], cam_rgb_spec[3], cam_rgb_rspec[3];
sample_rgb(rgb, 0.9, 0);
mat3_mulv(srgb_to_xyz, rgb, xyz);
mat3_mulv(xyz_to_cam, xyz, cam_rgb); // convert to camera by matrix
normalise1(xyz); // convert to chromaticity
fetch_coeff(xyz, spectra, sh->wd, sh->ht, cf);
if(cf[0] == 0.0) continue; // discard out of spectral locus
for(int k=0;k<3;k++) // camera rgb by processing spectrum * cfa spectrum
cam_rgb_spec[k] = eval(cfa+num_coeff*k, cf, num_coeff, 3);
mat3_mulv(cam_to_xyz, cam_rgb_spec, xyz2); // spectral camera to xyz via matrix
for(int k=0;k<3;k++) // also compute a reference
cam_rgb_rspec[k] = eval_ref(cfa_spec, k, cfa_spec_cnt, cf);
mat3_mulv(cam_to_xyz, cam_rgb_rspec, xyz3);
double rec2020_from_mat[3] = {0.0};
mat3_mulv(xyz_to_rec2020, xyz2, rec2020_from_mat);
normalise1(cam_rgb);
normalise1(cam_rgb_spec);
normalise1(cam_rgb_rspec);
normalise1(xyz2);
normalise1(xyz3);
// also write exactly the thing we'll do runtime: camera rgb -> matrix and camera rgb -> lut
double tc[2] = {cam_rgb_spec[0], cam_rgb_spec[2]}; // normalised already
double rec2020[4] = {0.0}, xyz_spec[3] = {0.0};
fetch_coeff(tc, chroma, cwd, cht, rec2020); // fetch rb
rec2020[2] = rec2020[1]; // convert to rgb
rec2020[1] = 1.0-rec2020[0]-rec2020[2];
mat3_mulv(rec2020_to_xyz, rec2020, xyz_spec);
normalise1(rec2020_from_mat);
normalise1(rec2020);
normalise1(xyz_spec);
fprintf(f0, "%g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g\n",
xyz[0], xyz[1], xyz2[0], xyz2[1], xyz3[0], xyz3[1],
cam_rgb[0], cam_rgb[1], cam_rgb[2],
cam_rgb_spec[0], cam_rgb_spec[1], cam_rgb_spec[2],
cam_rgb_rspec[0], cam_rgb_rspec[1], cam_rgb_rspec[2],
xyz_spec[0], xyz_spec[1], xyz2[0], xyz2[1]);
}
fclose(f0);
}
#if 0
static inline void
write_identity_lut(
int wd,
int ht)
{
header_t hout = {
.magic = 1234,
.version = 2,
.channels = 2,
.datatype = 0,
.wd = wd,
.ht = ht,
};
FILE *f = fopen("id.lut", "wb");
fwrite(&hout, sizeof(hout), 1, f);
uint16_t *b16 = calloc(sizeof(uint16_t), wd*ht*2);
for(int j=0;j<ht;j++)
for(int i=0;i<wd;i++)
{
const int k=j*wd+i;
double rb[2] = {(i+0.5)/wd, (j+0.5)/ht};
quad2tri(rb, rb+1);
b16[2*k+0] = float_to_half(rb[0]);
b16[2*k+1] = float_to_half(rb[1]);
}
fwrite(b16, sizeof(uint16_t), wd*ht*2, f);
fclose(f);
free(b16);
}
#endif
// create 2.5D chroma lut
static inline float*
create_chroma_lut(
int *wd_out,
int *ht_out,
const float *spectra,
const header_t *sh,
const double *cfa,
const double (*cfa_spec)[4],
const int cfa_spec_cnt,
const double *xyz_to_cam,
const double *cam_to_xyz)
{
const int ssf = 0; // DEBUG output lut based on measured curve
// to avoid interpolation artifacts we only want to place straight pixel
// center values of our spectra.lut in the output:
int swd = sh->wd, sht = sh->ht; // sampling dimensions
int wd = swd, ht = sht; // output dimensions
float *buf = calloc(sizeof(float)*4, wd*ht+1);
// do two passes over the data
// get illum E white point (lowest saturation) in camera rgb and quad param:
const double white_xyz[3] = {1.0f/3.0f, 1.0f/3.0f, 1.0f/3.0f};
double white_cam_rgb[3];
mat3_mulv(xyz_to_cam, white_xyz, white_cam_rgb);
normalise1(white_cam_rgb); // should not be needed
tri2quad(white_cam_rgb, white_cam_rgb+2);
// first pass: get rough idea about max deviation from white and the saturation we got there
double *angular_ds = calloc(sizeof(double), 360*2);
int sample_wd = swd, sample_ht = sht;
for(int j=0;j<sample_ht;j++) for(int i=0;i<sample_wd;i++)
{
double xy[2] = {(i+0.5)/sample_wd, (j+0.5)/sample_ht};
quad2tri(xy+0, xy+1);
double cf[3]; // look up the coeffs for the sampled colour spectrum
fetch_coeffi(xy, spectra, sh->wd, sh->ht, cf); // nearest
if(cf[0] == 0) continue; // discard out of spectral locus
double cam_rgb_spec[3] = {0.0}; // camera rgb by processing spectrum * cfa spectrum
for(int k=0;k<3;k++)
if(ssf) cam_rgb_spec[k] = eval_ref(cfa_spec, k, cfa_spec_cnt, cf);
else cam_rgb_spec[k] = eval(cfa+num_coeff*k, cf, num_coeff, 3);
normalise1(cam_rgb_spec);
double u0 = cam_rgb_spec[0], u1 = cam_rgb_spec[2];
tri2quad(&u0, &u1);
float fxy[] = {xy[0], xy[1]}, white[] = {1.0f/3.0f, 1.0f/3.0f};
float sat = dt_spectrum_saturation(fxy, white);
// find angular max dist + sat
int bin = CLAMP(180.0/M_PI * (M_PI + atan2(u1-white_cam_rgb[2], u0-white_cam_rgb[0])), 0, 359);
double dist2 =
(u1-white_cam_rgb[2])*(u1-white_cam_rgb[2])+
(u0-white_cam_rgb[0])*(u0-white_cam_rgb[0]);
if(dist2 > angular_ds[2*bin])
{
angular_ds[2*bin+0] = dist2;
angular_ds[2*bin+1] = sat;
}
}
double white_norm = 1.0;
{
double coeff[3] = {0.0};
coeff[2] = 100000.0;
double white_cam_rgb[3] = {0.0};
white_cam_rgb[0] = eval(cfa+num_coeff*0, coeff, num_coeff, 3);
white_cam_rgb[1] = eval(cfa+num_coeff*1, coeff, num_coeff, 3);
white_cam_rgb[2] = eval(cfa+num_coeff*2, coeff, num_coeff, 3);
white_norm = normalise1(white_cam_rgb);
}
// 2nd pass:
// #pragma omp parallel for schedule(dynamic) collapse(2) default(shared)
for(int j=0;j<sht;j++) for(int i=0;i<swd;i++)
{
double xy[2] = {(i+0.5)/swd, (j+0.5)/sht};
quad2tri(xy+0, xy+1);
const double xyz[3] = {xy[0], xy[1], 1.0-xy[0]-xy[1]};
double cf[3]; // look up the coeffs for the sampled colour spectrum
fetch_coeff(xy, spectra, sh->wd, sh->ht, cf); // interpolate
// fetch_coeffi(xy, spectra, sh->wd, sh->ht, cf); // nearest
if(cf[0] == 0) continue; // discard out of spectral locus
double cam_rgb_spec[3] = {0.0}; // camera rgb by processing spectrum * cfa spectrum
for(int k=0;k<3;k++)
if(ssf) cam_rgb_spec[k] = eval_ref(cfa_spec, k, cfa_spec_cnt, cf);
else cam_rgb_spec[k] = eval(cfa+num_coeff*k, cf, num_coeff, 3);
double norm = normalise1(cam_rgb_spec);
float fxy[] = {xy[0], xy[1]}, white[2] = {1.0f/3.0f, 1.0f/3.0f};
float sat = dt_spectrum_saturation(fxy, white);
// convert tri t to quad u:
double u0 = cam_rgb_spec[0], u1 = cam_rgb_spec[2];
tri2quad(&u0, &u1);
int bin = CLAMP(180.0/M_PI * (M_PI + atan2(u1-white_cam_rgb[2], u0-white_cam_rgb[0])), 0, 359);
double dist2 =
(u1-white_cam_rgb[2])*(u1-white_cam_rgb[2])+
(u0-white_cam_rgb[0])*(u0-white_cam_rgb[0]);
if(dist2 < angular_ds[2*bin] && sat > angular_ds[2*bin+1])
continue; // discard higher xy sat for lower rgb sat
if(dist2 < 0.8*0.8*angular_ds[2*bin] && sat > 0.95*angular_ds[2*bin+1])
continue; // be harsh to values straddling our bounds
// sort this into rb/sum(rgb) map in camera rgb
int ii = CLAMP(u0 * wd + 0.5, 0, wd-1);
int jj = CLAMP(u1 * ht + 0.5, 0, ht-1);
double rec2020[3];
mat3_mulv(xyz_to_rec2020, xyz, rec2020);
normalise1(rec2020);
buf[4*(jj*wd + ii)+0] = rec2020[0];
buf[4*(jj*wd + ii)+1] = rec2020[2];
buf[4*(jj*wd + ii)+2] = norm/white_norm; // store relative norm too!
}
free(angular_ds);
*wd_out = wd;
*ht_out = ht;
return buf;
}
// write look up table based on hue and chroma:
static inline void
write_chroma_lut(
const char *basename,
const float *buf,
const int wd,
const int ht)
{
char filename[256] = {0};
snprintf(filename, sizeof(filename), "%s.pfm", basename);
FILE *f = fopen(filename, "wb");
fprintf(f, "PF\n%d %d\n-1.0\n", wd, ht);
for(int k=0;k<wd*ht;k++)
{
float col[3] = {buf[4*k], buf[4*k+1], 1.0-buf[4*k]-buf[4*k+1]};
fwrite(col, sizeof(float), 3, f);
}
fclose(f);
header_t hout = {
.magic = 1234,
.version = 2,
.channels = 2,
.datatype = 0,
.wd = wd,
.ht = ht,
};
snprintf(filename, sizeof(filename), "%s.lut", basename);
f = fopen(filename, "wb");
fwrite(&hout, sizeof(hout), 1, f);
uint16_t *b16 = calloc(sizeof(uint16_t), wd*ht*2);
for(int k=0;k<wd*ht;k++)
{
b16[2*k+0] = float_to_half(buf[4*k+0]);
b16[2*k+1] = float_to_half(buf[4*k+1]);
}
fwrite(b16, sizeof(uint16_t), wd*ht*2, f);
fclose(f);
free(b16);
}
static inline void
print_cfa_coeffs(double *cfa)
{
fprintf(stderr, "[vkdt-mkidt] red cfa coeffs ");
for(int k=0;k<num_coeff;k++) fprintf(stderr, "%2.8g ", cfa[k]);
fprintf(stderr, "\n[vkdt-mkidt] green cfa coeffs ");
for(int k=0;k<num_coeff;k++) fprintf(stderr, "%2.8g ", cfa[num_coeff+k]);
fprintf(stderr, "\n[vkdt-mkidt] blue cfa coeffs ");
for(int k=0;k<num_coeff;k++) fprintf(stderr, "%2.8g ", cfa[2*num_coeff+k]);
fprintf(stderr, "\n");
}
static inline void
write_camera_curves(
const char *basename,
const double *cfa)
{ // plot 'dat' u 1:2 w l lw 4, '' u 1:3 w l lw 4, '' u 1:4 w l lw 4
char filename[256] = {0};
snprintf(filename, sizeof(filename), "%s_curves.dat", basename);
FILE *f = fopen(filename, "wb");
if(!f) return;
for(int i=0;i<CIE_SAMPLES;i++)
{ // plot the camera curves:
double lambda0 = (i+.5)/(double)CIE_FINE_SAMPLES;
double lambda1 = lambda0 * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) + CIE_LAMBDA_MIN;
double s0 = sigmoid(poly(cfa+0*num_coeff, lambda0, num_coeff));
double s1 = sigmoid(poly(cfa+1*num_coeff, lambda0, num_coeff));
double s2 = sigmoid(poly(cfa+2*num_coeff, lambda0, num_coeff));
fprintf(f, "%g %g %g %g\n", lambda1, s0, s1, s2);
}
fclose(f);
}
int main(int argc, char *argv[])
{
// warm up random number generator
for(int k=0;k<10;k++) xrand();
// load spectra.lut:
header_t header;
float *spectra = 0;
{
FILE *f = fopen("spectra.lut", "rb");
if(!f) goto error;
if(fread(&header, sizeof(header_t), 1, f) != 1) goto error;
if(header.channels != 4) goto error;
if(header.version != 2) goto error;
spectra = calloc(4*sizeof(float), header.wd * header.ht);
fread(spectra, header.wd*header.ht, 4*sizeof(float), f);
fclose(f);
if(0)
{
error:
if(f) fclose(f);
fprintf(stderr, "[vkdt-mkidt] could not read spectra.lut!\n");
exit(2);
}
}
// these influence the problem size and the mode of operation of the fitter:
// low quality mode:
num_coeff = 6; // number of coefficients in the sigmoid/polynomial cfa spectra
int num_it = 500; // iterations per batch
int num = 10; // number of data points (spectrum/xy) per batch
int batch_cnt = 50; // number of batches
// initial thoughts: using 100 batches and 1000 iterations (num=50)
// improves the fit a bit, only not in the reds. probably not
// converged yet, but indeed it seems more data serves the process.
int der = 0, hq = 0;
const char *model = "Canon EOS 5D Mark II";
for(int k=1;k<argc;k++)
{
if (!strcmp(argv[k], "--hq" )) hq = 1; // high quality mode
else if(!strcmp(argv[k], "--der")) der = 1; // use analytic jacobian
else model = argv[k];
}
if(hq && !der)
{
fprintf(stderr, "[vkdt-mkidt] setting high quality mode, this can be slow..\n");
num_coeff = 6;
num_it = 1000; // 100k unfortunately it does improve from 10k, not by much but notably. this is 10x the cost :(
batch_cnt = 100;
num = 70;
}
else if(hq && der)
{ // these need way more iterations because they often bail out early
fprintf(stderr, "[vkdt-mkidt] setting high quality mode and analytic jacobian. this can be slow..\n");
num_coeff = 6;
num_it = 1000;
batch_cnt = 100;
num = 70;
}
double xyz_to_cam[9];
float adobe_mat[12] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
fprintf(stderr, "[vkdt-mkidt] using matrix for `%s'\n", model);
if(strcmp(model, "identity") && dt_dcraw_adobe_coeff(model, &adobe_mat))
{
fprintf(stderr, "[vkdt-mkidt] could not find this camera model (%s)! check your spelling?\n", model);
exit(3);
}
for(int i=0;i<9;i++) xyz_to_cam[i] = adobe_mat[i];
double cam_to_xyz[9] = {0.0};
mat3_inv(xyz_to_cam, cam_to_xyz);
// xyz -> camera rgb matrix. does it contain white balancing stuff?
fprintf(stderr, "[vkdt-mkidt] M = %g %g %g\n"
" %g %g %g\n"
" %g %g %g\n",
xyz_to_cam[0], xyz_to_cam[1], xyz_to_cam[2],
xyz_to_cam[3], xyz_to_cam[4], xyz_to_cam[5],
xyz_to_cam[6], xyz_to_cam[7], xyz_to_cam[8]);
// in particular, xyz white 1 1 1 maps to this camera rgb:
double white[3] = {0.0}, one[3] = {1.0, 1.0, 1.0};
mat3_mulv(xyz_to_cam, one, white);
double wbcoeff[3] = {1.0/white[0], 1.0/white[1], 1.0/white[2]};
fprintf(stderr, "[vkdt-mkidt] white = %g %g %g\n", white[0], white[1], white[2]);
fprintf(stderr, "[vkdt-mkidt] wb coeff = %g %g %g\n", wbcoeff[0], wbcoeff[1], wbcoeff[2]);
// init initial cfa params and lower/upper bounds:
double cfa[30] = {0.0};
double lb[30] = {0.0};
double ub[30] = {0.0};
// these bounds are important for regularisation. else we'll fit to box spectra to be
// able to match the matrix better. +-50 seem to work really well in most cases.
// fuji/nikon sometimes asks for extended range, but i think it would be better to increase
// the number of coefficients/degree of the polynomial instead.
for(int k=0;k<3*num_coeff;k++) lb[k] = -50;
for(int k=0;k<3*num_coeff;k++) ub[k] = 50;
// for(int k=0;k<3*num_coeff;k++) lb[k] = -(ub[k] = 500); // for kodak need more complex blue
int e = num_coeff-3; // quadratic term
cfa[e+0] = cfa[e+num_coeff] = cfa[e+2*num_coeff] = -3.0;
// construct data point arrays for levmar, 3x for each tristimulus/camera rgb channel:
double *data = calloc(num, sizeof(double) * 3); // data point: spectral coeff for input
double *target = calloc(num, sizeof(double) * 2); // target chroma
fprintf(stderr, "[vkdt-mkidt] starting optimiser..");
double resid = 1.0;
// run optimisation in mini-batches
for (int batch=0;batch<batch_cnt;batch++)
{
for(int i=0;i<num;i++)
{
// pick a random srgb colour inside gamut
double rgb[3] = {0.0}, xyz[3] = {0.0};
sample_rgb(rgb, 0.17, 1); // chosen to reach all of srgb at least potentially
// sample_rgb(rgb, 0.3, 1); // relaxed for kodak
mat3_mulv(srgb_to_xyz, rgb, xyz);
normalise1(xyz);
double cf[3]; // look up the coeffs for the sampled colour spectrum
fetch_coeff(xyz, spectra, header.wd, header.ht, cf);
// apply xyz to camera rgb matrix:
double cam_rgb[3] = {0.0};
mat3_mulv(xyz_to_cam, xyz, cam_rgb);
normalise1(cam_rgb);
memcpy(target+2*i, cam_rgb, sizeof(double)*2);
memcpy(data+3*i, cf, sizeof(double)*3);
}
#ifdef USE_LEVMAR
double info[LM_INFO_SZ] = {0};
double opts[LM_OPTS_SZ] = {
// init-mu eps Jte eps Dp eps err eps diff
0.2, 1E-15, 1E-40, 1E-15, 1e-5};//LM_DIFF_DELTA};
if(der)
{
dlevmar_bc_der(
lm_callback, lm_jacobian, cfa, target, 3*num_coeff, 2*num,
lb, ub, NULL, // dscl, // diagonal scaling constants (?)
num_it, opts, info, NULL, NULL, data);
}
else
{
dlevmar_bc_dif(
lm_callback, cfa, target, 3*num_coeff, 2*num,
lb, ub, NULL, // dscl, // diagonal scaling constants (?)
num_it, opts, info, NULL, NULL, data);
}
// fprintf(stderr, " ||e||_2, ||J^T e||_inf, ||Dp||_2, mu/max[J^T J]_ii\n");
// fprintf(stderr, "info %g %g %g %g\n", info[1], info[2], info[3], info[4]);
fprintf(stderr, "\r[vkdt-mkidt] batch %d/%d it %04d/%d reason %g resid %g -> %g ",
batch+1, batch_cnt, (int)info[5], num_it, info[6], info[0], info[1]);
fprintf(stdout, "%g %g\n", info[0], info[1]);
#else
fprintf(stdout, "%g ", resid);
resid = dt_gauss_newton_cg(
lm_callback,
// lm_jacobian, // does not like our analytic jacobian
lm_jacobian_dif,
cfa, target, 3*num_coeff, 2*num,
lb, ub, num_it, data);
fprintf(stderr, "\r[vkdt-mkidt] batch %d/%d resid %g ",
batch+1, batch_cnt, resid);
// fprintf(stdout, "%g\n", resid); // write convergence history
#endif
} // end mini batches
fprintf(stderr, "\n");
// now we're done, prepare the data for some useful output:
// load reference spectra from txt, if we can
double cfa_spec[100][4] = {{0.0}};
const int cfa_spec_cnt = load_reference_cfa_spectra(model, cfa_spec);
// create the actual 2D chroma lut
int wd, ht;
float *buf = create_chroma_lut(&wd, &ht, spectra, &header, cfa, cfa_spec, cfa_spec_cnt, xyz_to_cam, cam_to_xyz);
#if 1 // then hole fill it
dt_inpaint_buf_t inpaint_buf = {
.dat = buf,
.wd = wd,
.ht = ht,
.cpp = 4,
};
dt_inpaint(&inpaint_buf);
#endif
char basename[256] = {0}; // get sanitised basename
snprintf(basename, sizeof(basename), "%s", model);
int len = strnlen(basename, sizeof(basename));
if(!strcasecmp(".txt", basename + len - 4))
basename[len-4] = 0;
for(int i=0;i<len;i++) if(basename[i] == ' ') basename[i] = '_';
// write the chroma lut to half float .lut as well as .pfm for debugging:
write_chroma_lut(basename, buf, wd, ht);
// write a couple of sample points for debug vector plots
write_sample_points(basename, cfa, spectra, &header, cfa_spec, cfa_spec_cnt, xyz_to_cam, cam_to_xyz, buf, wd, ht);
// output the coefficients to console
print_cfa_coeffs(cfa);
// write the cfa curves to a file for plotting
write_camera_curves(basename, cfa);
// write a camera rgb == rec2020 identity lut for debugging
// write_identity_lut(wd, ht);
free(buf);
exit(0);
}
|
sstruct_matrix.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
*
* Member functions for hypre_SStructPMatrix class.
*
*****************************************************************************/
#include "_hypre_sstruct_mv.h"
/*==========================================================================
* SStructPMatrix routines
*==========================================================================*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixRef( hypre_SStructPMatrix *matrix,
hypre_SStructPMatrix **matrix_ref )
{
hypre_SStructPMatrixRefCount(matrix) ++;
*matrix_ref = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixCreate( MPI_Comm comm,
hypre_SStructPGrid *pgrid,
hypre_SStructStencil **stencils,
hypre_SStructPMatrix **pmatrix_ptr )
{
hypre_SStructPMatrix *pmatrix;
HYPRE_Int nvars;
HYPRE_Int **smaps;
hypre_StructStencil ***sstencils;
hypre_StructMatrix ***smatrices;
HYPRE_Int **symmetric;
hypre_StructStencil *sstencil;
HYPRE_Int *vars;
hypre_Index *sstencil_shape;
HYPRE_Int sstencil_size;
HYPRE_Int new_dim;
HYPRE_Int *new_sizes;
hypre_Index **new_shapes;
HYPRE_Int size;
hypre_StructGrid *sgrid;
HYPRE_Int vi, vj;
HYPRE_Int i, j, k;
pmatrix = hypre_TAlloc(hypre_SStructPMatrix, 1);
hypre_SStructPMatrixComm(pmatrix) = comm;
hypre_SStructPMatrixPGrid(pmatrix) = pgrid;
hypre_SStructPMatrixStencils(pmatrix) = stencils;
nvars = hypre_SStructPGridNVars(pgrid);
hypre_SStructPMatrixNVars(pmatrix) = nvars;
/* create sstencils */
smaps = hypre_TAlloc(HYPRE_Int *, nvars);
sstencils = hypre_TAlloc(hypre_StructStencil **, nvars);
new_sizes = hypre_TAlloc(HYPRE_Int, nvars);
new_shapes = hypre_TAlloc(hypre_Index *, nvars);
size = 0;
for (vi = 0; vi < nvars; vi++)
{
sstencils[vi] = hypre_TAlloc(hypre_StructStencil *, nvars);
for (vj = 0; vj < nvars; vj++)
{
sstencils[vi][vj] = NULL;
new_sizes[vj] = 0;
}
sstencil = hypre_SStructStencilSStencil(stencils[vi]);
vars = hypre_SStructStencilVars(stencils[vi]);
sstencil_shape = hypre_StructStencilShape(sstencil);
sstencil_size = hypre_StructStencilSize(sstencil);
smaps[vi] = hypre_TAlloc(HYPRE_Int, sstencil_size);
for (i = 0; i < sstencil_size; i++)
{
j = vars[i];
new_sizes[j]++;
}
for (vj = 0; vj < nvars; vj++)
{
if (new_sizes[vj])
{
new_shapes[vj] = hypre_TAlloc(hypre_Index, new_sizes[vj]);
new_sizes[vj] = 0;
}
}
for (i = 0; i < sstencil_size; i++)
{
j = vars[i];
k = new_sizes[j];
hypre_CopyIndex(sstencil_shape[i], new_shapes[j][k]);
smaps[vi][i] = k;
new_sizes[j]++;
}
new_dim = hypre_StructStencilNDim(sstencil);
for (vj = 0; vj < nvars; vj++)
{
if (new_sizes[vj])
{
sstencils[vi][vj] =
hypre_StructStencilCreate(new_dim, new_sizes[vj], new_shapes[vj]);
}
size = hypre_max(size, new_sizes[vj]);
}
}
hypre_SStructPMatrixSMaps(pmatrix) = smaps;
hypre_SStructPMatrixSStencils(pmatrix) = sstencils;
hypre_TFree(new_sizes);
hypre_TFree(new_shapes);
/* create smatrices */
smatrices = hypre_TAlloc(hypre_StructMatrix **, nvars);
for (vi = 0; vi < nvars; vi++)
{
smatrices[vi] = hypre_TAlloc(hypre_StructMatrix *, nvars);
for (vj = 0; vj < nvars; vj++)
{
smatrices[vi][vj] = NULL;
if (sstencils[vi][vj] != NULL)
{
sgrid = hypre_SStructPGridSGrid(pgrid, vi);
smatrices[vi][vj] =
hypre_StructMatrixCreate(comm, sgrid, sstencils[vi][vj]);
}
}
}
hypre_SStructPMatrixSMatrices(pmatrix) = smatrices;
/* create symmetric */
symmetric = hypre_TAlloc(HYPRE_Int *, nvars);
for (vi = 0; vi < nvars; vi++)
{
symmetric[vi] = hypre_TAlloc(HYPRE_Int, nvars);
for (vj = 0; vj < nvars; vj++)
{
symmetric[vi][vj] = 0;
}
}
hypre_SStructPMatrixSymmetric(pmatrix) = symmetric;
hypre_SStructPMatrixSEntriesSize(pmatrix) = size;
hypre_SStructPMatrixSEntries(pmatrix) = hypre_TAlloc(HYPRE_Int, size);
hypre_SStructPMatrixRefCount(pmatrix) = 1;
*pmatrix_ptr = pmatrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixDestroy( hypre_SStructPMatrix *pmatrix )
{
hypre_SStructStencil **stencils;
HYPRE_Int nvars;
HYPRE_Int **smaps;
hypre_StructStencil ***sstencils;
hypre_StructMatrix ***smatrices;
HYPRE_Int **symmetric;
HYPRE_Int vi, vj;
if (pmatrix)
{
hypre_SStructPMatrixRefCount(pmatrix) --;
if (hypre_SStructPMatrixRefCount(pmatrix) == 0)
{
stencils = hypre_SStructPMatrixStencils(pmatrix);
nvars = hypre_SStructPMatrixNVars(pmatrix);
smaps = hypre_SStructPMatrixSMaps(pmatrix);
sstencils = hypre_SStructPMatrixSStencils(pmatrix);
smatrices = hypre_SStructPMatrixSMatrices(pmatrix);
symmetric = hypre_SStructPMatrixSymmetric(pmatrix);
for (vi = 0; vi < nvars; vi++)
{
HYPRE_SStructStencilDestroy(stencils[vi]);
hypre_TFree(smaps[vi]);
for (vj = 0; vj < nvars; vj++)
{
hypre_StructStencilDestroy(sstencils[vi][vj]);
hypre_StructMatrixDestroy(smatrices[vi][vj]);
}
hypre_TFree(sstencils[vi]);
hypre_TFree(smatrices[vi]);
hypre_TFree(symmetric[vi]);
}
hypre_TFree(stencils);
hypre_TFree(smaps);
hypre_TFree(sstencils);
hypre_TFree(smatrices);
hypre_TFree(symmetric);
hypre_TFree(hypre_SStructPMatrixSEntries(pmatrix));
hypre_TFree(pmatrix);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixInitialize( hypre_SStructPMatrix *pmatrix )
{
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
HYPRE_Int **symmetric = hypre_SStructPMatrixSymmetric(pmatrix);
hypre_StructMatrix *smatrix;
HYPRE_Int vi, vj;
/* HYPRE_Int num_ghost[2*HYPRE_MAXDIM]; */
/* HYPRE_Int vi, vj, d, ndim; */
#if 0
ndim = hypre_SStructPMatrixNDim(pmatrix);
/* RDF: Why are the ghosts being reset to one? Maybe it needs to be at least
* one to set shared coefficients correctly, but not exactly one? */
for (d = 0; d < ndim; d++)
{
num_ghost[2*d] = num_ghost[2*d+1] = 1;
}
#endif
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
HYPRE_StructMatrixSetSymmetric(smatrix, symmetric[vi][vj]);
/* hypre_StructMatrixSetNumGhost(smatrix, num_ghost); */
hypre_StructMatrixInitialize(smatrix);
/* needed to get AddTo accumulation correct between processors */
hypre_StructMatrixClearGhostValues(smatrix);
}
}
}
hypre_SStructPMatrixAccumulated(pmatrix) = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixSetValues( hypre_SStructPMatrix *pmatrix,
hypre_Index index,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
hypre_SStructStencil *stencil = hypre_SStructPMatrixStencil(pmatrix, var);
HYPRE_Int *smap = hypre_SStructPMatrixSMap(pmatrix, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_StructMatrix *smatrix;
hypre_BoxArray *grid_boxes;
hypre_Box *box, *grow_box;
HYPRE_Int *sentries;
HYPRE_Int i;
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entries[0]]);
sentries = hypre_SStructPMatrixSEntries(pmatrix);
for (i = 0; i < nentries; i++)
{
sentries[i] = smap[entries[i]];
}
/* set values inside the grid */
hypre_StructMatrixSetValues(smatrix, index, nentries, sentries, values,
action, -1, 0);
/* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */
if (action != 0)
{
/* AddTo/Get */
hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix);
hypre_Index varoffset;
HYPRE_Int done = 0;
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
hypre_ForBoxI(i, grid_boxes)
{
box = hypre_BoxArrayBox(grid_boxes, i);
if (hypre_IndexInBox(index, box))
{
done = 1;
break;
}
}
if (!done)
{
grow_box = hypre_BoxCreate(hypre_BoxArrayNDim(grid_boxes));
hypre_SStructVariableGetOffset(hypre_SStructPGridVarType(pgrid, var),
hypre_SStructPGridNDim(pgrid), varoffset);
hypre_ForBoxI(i, grid_boxes)
{
box = hypre_BoxArrayBox(grid_boxes, i);
hypre_CopyBox(box, grow_box);
hypre_BoxGrowByIndex(grow_box, varoffset);
if (hypre_IndexInBox(index, grow_box))
{
hypre_StructMatrixSetValues(smatrix, index, nentries, sentries,
values, action, i, 1);
break;
}
}
hypre_BoxDestroy(grow_box);
}
}
else
{
/* Set */
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
hypre_ForBoxI(i, grid_boxes)
{
box = hypre_BoxArrayBox(grid_boxes, i);
if (!hypre_IndexInBox(index, box))
{
hypre_StructMatrixClearValues(smatrix, index, nentries, sentries, i, 1);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
* (action =-2): get values and zero out
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixSetBoxValues( hypre_SStructPMatrix *pmatrix,
hypre_Index ilower,
hypre_Index iupper,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructPMatrixNDim(pmatrix);
hypre_SStructStencil *stencil = hypre_SStructPMatrixStencil(pmatrix, var);
HYPRE_Int *smap = hypre_SStructPMatrixSMap(pmatrix, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_StructMatrix *smatrix;
hypre_BoxArray *grid_boxes;
hypre_Box *box;
hypre_Box *value_box;
HYPRE_Int *sentries;
HYPRE_Int i, j;
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entries[0]]);
box = hypre_BoxCreate(hypre_StructMatrixNDim(smatrix));
hypre_CopyIndex(ilower, hypre_BoxIMin(box));
hypre_CopyIndex(iupper, hypre_BoxIMax(box));
value_box = box;
sentries = hypre_SStructPMatrixSEntries(pmatrix);
for (i = 0; i < nentries; i++)
{
sentries[i] = smap[entries[i]];
}
/* set values inside the grid */
hypre_StructMatrixSetBoxValues(smatrix, box, value_box, nentries, sentries,
values, action, -1, 0);
/* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */
if (action != 0)
{
/* AddTo/Get */
hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix);
hypre_Index varoffset;
hypre_BoxArray *left_boxes, *done_boxes, *temp_boxes;
hypre_Box *left_box, *done_box, *int_box;
hypre_SStructVariableGetOffset(hypre_SStructPGridVarType(pgrid, var),
hypre_SStructPGridNDim(pgrid), varoffset);
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
left_boxes = hypre_BoxArrayCreate(1, ndim);
done_boxes = hypre_BoxArrayCreate(2, ndim);
temp_boxes = hypre_BoxArrayCreate(0, ndim);
/* done_box always points to the first box in done_boxes */
done_box = hypre_BoxArrayBox(done_boxes, 0);
/* int_box always points to the second box in done_boxes */
int_box = hypre_BoxArrayBox(done_boxes, 1);
hypre_CopyBox(box, hypre_BoxArrayBox(left_boxes, 0));
hypre_BoxArraySetSize(left_boxes, 1);
hypre_SubtractBoxArrays(left_boxes, grid_boxes, temp_boxes);
hypre_BoxArraySetSize(done_boxes, 0);
hypre_ForBoxI(i, grid_boxes)
{
hypre_SubtractBoxArrays(left_boxes, done_boxes, temp_boxes);
hypre_BoxArraySetSize(done_boxes, 1);
hypre_CopyBox(hypre_BoxArrayBox(grid_boxes, i), done_box);
hypre_BoxGrowByIndex(done_box, varoffset);
hypre_ForBoxI(j, left_boxes)
{
left_box = hypre_BoxArrayBox(left_boxes, j);
hypre_IntersectBoxes(left_box, done_box, int_box);
hypre_StructMatrixSetBoxValues(smatrix, int_box, value_box,
nentries, sentries,
values, action, i, 1);
}
}
hypre_BoxArrayDestroy(left_boxes);
hypre_BoxArrayDestroy(done_boxes);
hypre_BoxArrayDestroy(temp_boxes);
}
else
{
/* Set */
hypre_BoxArray *diff_boxes;
hypre_Box *grid_box, *diff_box;
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
diff_boxes = hypre_BoxArrayCreate(0, ndim);
hypre_ForBoxI(i, grid_boxes)
{
grid_box = hypre_BoxArrayBox(grid_boxes, i);
hypre_BoxArraySetSize(diff_boxes, 0);
hypre_SubtractBoxes(box, grid_box, diff_boxes);
hypre_ForBoxI(j, diff_boxes)
{
diff_box = hypre_BoxArrayBox(diff_boxes, j);
hypre_StructMatrixClearBoxValues(smatrix, diff_box, nentries, sentries,
i, 1);
}
}
hypre_BoxArrayDestroy(diff_boxes);
}
hypre_BoxDestroy(box);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixAccumulate( hypre_SStructPMatrix *pmatrix )
{
hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix);
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
HYPRE_Int ndim = hypre_SStructPGridNDim(pgrid);
HYPRE_SStructVariable *vartypes = hypre_SStructPGridVarTypes(pgrid);
hypre_StructMatrix *smatrix;
hypre_Index varoffset;
HYPRE_Int num_ghost[2*HYPRE_MAXDIM];
hypre_StructGrid *sgrid;
HYPRE_Int vi, vj, d;
hypre_CommInfo *comm_info;
hypre_CommPkg *comm_pkg;
hypre_CommHandle *comm_handle;
/* if values already accumulated, just return */
if (hypre_SStructPMatrixAccumulated(pmatrix))
{
return hypre_error_flag;
}
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
sgrid = hypre_StructMatrixGrid(smatrix);
/* assumes vi and vj vartypes are the same */
hypre_SStructVariableGetOffset(vartypes[vi], ndim, varoffset);
for (d = 0; d < ndim; d++)
{
num_ghost[2*d] = num_ghost[2*d+1] = hypre_IndexD(varoffset, d);
}
/* accumulate values from AddTo */
hypre_CreateCommInfoFromNumGhost(sgrid, num_ghost, &comm_info);
hypre_CommPkgCreate(comm_info,
hypre_StructMatrixDataSpace(smatrix),
hypre_StructMatrixDataSpace(smatrix),
hypre_StructMatrixNumValues(smatrix), NULL, 1,
hypre_StructMatrixComm(smatrix),
&comm_pkg);
hypre_InitializeCommunication(comm_pkg,
hypre_StructMatrixData(smatrix),
hypre_StructMatrixData(smatrix),
1, 0, &comm_handle);
hypre_FinalizeCommunication(comm_handle);
hypre_CommInfoDestroy(comm_info);
hypre_CommPkgDestroy(comm_pkg);
}
}
}
hypre_SStructPMatrixAccumulated(pmatrix) = 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixAssemble( hypre_SStructPMatrix *pmatrix )
{
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
hypre_StructMatrix *smatrix;
HYPRE_Int vi, vj;
hypre_SStructPMatrixAccumulate(pmatrix);
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
hypre_StructMatrixClearGhostValues(smatrix);
hypre_StructMatrixAssemble(smatrix);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixSetSymmetric( hypre_SStructPMatrix *pmatrix,
HYPRE_Int var,
HYPRE_Int to_var,
HYPRE_Int symmetric )
{
HYPRE_Int **pmsymmetric = hypre_SStructPMatrixSymmetric(pmatrix);
HYPRE_Int vstart = var;
HYPRE_Int vsize = 1;
HYPRE_Int tstart = to_var;
HYPRE_Int tsize = 1;
HYPRE_Int v, t;
if (var == -1)
{
vstart = 0;
vsize = hypre_SStructPMatrixNVars(pmatrix);
}
if (to_var == -1)
{
tstart = 0;
tsize = hypre_SStructPMatrixNVars(pmatrix);
}
for (v = vstart; v < vsize; v++)
{
for (t = tstart; t < tsize; t++)
{
pmsymmetric[v][t] = symmetric;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixPrint( const char *filename,
hypre_SStructPMatrix *pmatrix,
HYPRE_Int all )
{
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
hypre_StructMatrix *smatrix;
HYPRE_Int vi, vj;
char new_filename[255];
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
hypre_sprintf(new_filename, "%s.%02d.%02d", filename, vi, vj);
hypre_StructMatrixPrint(new_filename, smatrix, all);
}
}
}
return hypre_error_flag;
}
/*==========================================================================
* SStructUMatrix routines
*==========================================================================*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixInitialize( hypre_SStructMatrix *matrix )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
HYPRE_Int nparts = hypre_SStructGraphNParts(graph);
hypre_SStructPGrid **pgrids = hypre_SStructGraphPGrids(graph);
hypre_SStructStencil ***stencils = hypre_SStructGraphStencils(graph);
HYPRE_Int nUventries = hypre_SStructGraphNUVEntries(graph);
HYPRE_Int *iUventries = hypre_SStructGraphIUVEntries(graph);
hypre_SStructUVEntry **Uventries = hypre_SStructGraphUVEntries(graph);
HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid);
hypre_StructGrid *sgrid;
hypre_SStructStencil *stencil;
HYPRE_Int *split;
HYPRE_Int nvars;
HYPRE_Int nrows, rowstart, nnzs ;
HYPRE_Int part, var, entry, b, m, mi;
HYPRE_Int *row_sizes;
HYPRE_Int max_row_size;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Box *ghost_box;
hypre_IndexRef start;
hypre_Index loop_size, stride;
HYPRE_IJMatrixSetObjectType(ijmatrix, HYPRE_PARCSR);
if (matrix_type == HYPRE_SSTRUCT || matrix_type == HYPRE_STRUCT)
{
rowstart = hypre_SStructGridGhstartRank(grid);
nrows = hypre_SStructGridGhlocalSize(grid) ;
}
else /* matrix_type == HYPRE_PARCSR */
{
rowstart = hypre_SStructGridStartRank(grid);
nrows = hypre_SStructGridLocalSize(grid);
}
/* set row sizes */
m = 0;
max_row_size = 0;
ghost_box = hypre_BoxCreate(ndim);
row_sizes = hypre_CTAlloc(HYPRE_Int, nrows);
hypre_SetIndex(stride, 1);
for (part = 0; part < nparts; part++)
{
nvars = hypre_SStructPGridNVars(pgrids[part]);
for (var = 0; var < nvars; var++)
{
sgrid = hypre_SStructPGridSGrid(pgrids[part], var);
stencil = stencils[part][var];
split = hypre_SStructMatrixSplit(matrix, part, var);
nnzs = 0;
for (entry = 0; entry < hypre_SStructStencilSize(stencil); entry++)
{
if (split[entry] == -1)
{
nnzs++;
}
}
#if 0
/* TODO: For now, assume stencil is full/complete */
if (hypre_SStructMatrixSymmetric(matrix))
{
nnzs = 2*nnzs - 1;
}
#endif
boxes = hypre_StructGridBoxes(sgrid);
hypre_ForBoxI(b, boxes)
{
box = hypre_BoxArrayBox(boxes, b);
hypre_CopyBox(box, ghost_box);
if (matrix_type == HYPRE_SSTRUCT || matrix_type == HYPRE_STRUCT)
{
hypre_BoxGrowByArray(ghost_box, hypre_StructGridNumGhost(sgrid));
}
start = hypre_BoxIMin(box);
hypre_BoxGetSize(box, loop_size);
hypre_BoxLoop1Begin(hypre_SStructMatrixNDim(matrix), loop_size,
ghost_box, start, stride, mi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,mi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(mi)
{
row_sizes[m+mi] = nnzs;
}
hypre_BoxLoop1End(mi);
m += hypre_BoxVolume(ghost_box);
}
max_row_size = hypre_max(max_row_size, nnzs);
if (nvneighbors[part][var])
{
max_row_size =
hypre_max(max_row_size, hypre_SStructStencilSize(stencil));
}
}
}
hypre_BoxDestroy(ghost_box);
/* GEC0902 essentially for each UVentry we figure out how many extra columns
* we need to add to the rowsizes */
/* RDF: THREAD? */
for (entry = 0; entry < nUventries; entry++)
{
mi = iUventries[entry];
m = hypre_SStructUVEntryRank(Uventries[mi]) - rowstart;
if ((m > -1) && (m < nrows))
{
row_sizes[m] += hypre_SStructUVEntryNUEntries(Uventries[mi]);
max_row_size = hypre_max(max_row_size, row_sizes[m]);
}
}
/* ZTODO: Update row_sizes based on neighbor off-part couplings */
HYPRE_IJMatrixSetRowSizes (ijmatrix, (const HYPRE_Int *) row_sizes);
hypre_TFree(row_sizes);
hypre_SStructMatrixTmpColCoords(matrix) =
hypre_CTAlloc(HYPRE_Int, max_row_size);
hypre_SStructMatrixTmpCoeffs(matrix) =
hypre_CTAlloc(HYPRE_Complex, max_row_size);
HYPRE_IJMatrixInitialize(ijmatrix);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
*
* 9/09 - AB: modified to use the box manager - here we need to check the
* neighbor box manager also
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixSetValues( hypre_SStructMatrix *matrix,
HYPRE_Int part,
hypre_Index index,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
hypre_SStructGrid *dom_grid = hypre_SStructGraphDomainGrid(graph);
hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_Index *shape = hypre_SStructStencilShape(stencil);
HYPRE_Int size = hypre_SStructStencilSize(stencil);
hypre_IndexRef offset;
hypre_Index to_index;
hypre_SStructUVEntry *Uventry;
hypre_BoxManEntry *boxman_entry;
hypre_SStructBoxManInfo *entry_info;
HYPRE_Int row_coord;
HYPRE_Int *col_coords;
HYPRE_Int ncoeffs;
HYPRE_Complex *coeffs;
HYPRE_Int i, entry, Uverank;
HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix);
hypre_SStructGridFindBoxManEntry(grid, part, index, var, &boxman_entry);
/* if not local, check neighbors */
if (boxman_entry == NULL)
hypre_SStructGridFindNborBoxManEntry(grid, part, index, var, &boxman_entry);
if (boxman_entry == NULL)
{
hypre_error_in_arg(1);
hypre_error_in_arg(2);
hypre_error_in_arg(3);
return hypre_error_flag;
}
else
{
hypre_BoxManEntryGetInfo(boxman_entry, (void **) &entry_info);
}
hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index,
&row_coord, matrix_type);
col_coords = hypre_SStructMatrixTmpColCoords(matrix);
coeffs = hypre_SStructMatrixTmpCoeffs(matrix);
ncoeffs = 0;
for (i = 0; i < nentries; i++)
{
entry = entries[i];
if (entry < size)
{
/* stencil entries */
offset = shape[entry];
hypre_AddIndexes(index, offset, ndim, to_index);
hypre_SStructGridFindBoxManEntry(dom_grid, part, to_index, vars[entry],
&boxman_entry);
/* if not local, check neighbors */
if (boxman_entry == NULL)
hypre_SStructGridFindNborBoxManEntry(dom_grid, part, to_index,
vars[entry], &boxman_entry);
if (boxman_entry != NULL)
{
hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, to_index,
&col_coords[ncoeffs],matrix_type);
coeffs[ncoeffs] = values[i];
ncoeffs++;
}
}
else
{
/* non-stencil entries */
entry -= size;
hypre_SStructGraphGetUVEntryRank(graph, part, var, index, &Uverank);
if (Uverank > -1)
{
Uventry = hypre_SStructGraphUVEntry(graph, Uverank);
col_coords[ncoeffs] = hypre_SStructUVEntryToRank(Uventry, entry);
coeffs[ncoeffs] = values[i];
ncoeffs++;
}
}
}
if (action > 0)
{
HYPRE_IJMatrixAddToValues(ijmatrix, 1, &ncoeffs, &row_coord,
(const HYPRE_Int *) col_coords,
(const HYPRE_Complex *) coeffs);
}
else if (action > -1)
{
HYPRE_IJMatrixSetValues(ijmatrix, 1, &ncoeffs, &row_coord,
(const HYPRE_Int *) col_coords,
(const HYPRE_Complex *) coeffs);
}
else
{
HYPRE_IJMatrixGetValues(ijmatrix, 1, &ncoeffs, &row_coord,
col_coords, values);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Note: Entries must all be of type stencil or non-stencil, but not both.
*
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
* 9/09 - AB: modified to use the box manager- here we need to check the
* neighbor box manager also
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixSetBoxValues( hypre_SStructMatrix *matrix,
HYPRE_Int part,
hypre_Index ilower,
hypre_Index iupper,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
hypre_SStructGrid *dom_grid = hypre_SStructGraphDomainGrid(graph);
hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_Index *shape = hypre_SStructStencilShape(stencil);
HYPRE_Int size = hypre_SStructStencilSize(stencil);
hypre_IndexRef offset;
hypre_BoxManEntry **boxman_entries;
HYPRE_Int nboxman_entries;
hypre_BoxManEntry **boxman_to_entries;
HYPRE_Int nboxman_to_entries;
HYPRE_Int nrows;
HYPRE_Int *ncols;
HYPRE_Int *rows;
HYPRE_Int *cols;
HYPRE_Complex *ijvalues;
hypre_Box *box, *vbox;
hypre_Box *to_box;
hypre_Box *map_box;
hypre_Box *int_box;
hypre_Index index, stride, loop_size;
hypre_IndexRef start;
hypre_Index rs, cs;
HYPRE_Int row_base, col_base;
HYPRE_Int d, ei, entry, ii, jj, i, mi, vi;
HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix);
box = hypre_BoxCreate(ndim);
vbox = hypre_BoxCreate(ndim);
hypre_BoxSetExtents(vbox, ilower, iupper);
/*------------------------------------------
* all stencil entries
*------------------------------------------*/
if (entries[0] < size)
{
to_box = hypre_BoxCreate(ndim);
map_box = hypre_BoxCreate(ndim);
int_box = hypre_BoxCreate(ndim);
nrows = hypre_BoxVolume(vbox)*nentries;
ncols = hypre_CTAlloc(HYPRE_Int, nrows);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < nrows; i++)
{
ncols[i] = 1;
}
rows = hypre_CTAlloc(HYPRE_Int, nrows);
cols = hypre_CTAlloc(HYPRE_Int, nrows);
ijvalues = hypre_CTAlloc(HYPRE_Complex, nrows);
hypre_SetIndex(stride, 1);
hypre_SStructGridIntersect(grid, part, var, vbox, -1,
&boxman_entries, &nboxman_entries);
for (ii = 0; ii < nboxman_entries; ii++)
{
hypre_SStructBoxManEntryGetStrides(boxman_entries[ii], rs, matrix_type);
hypre_CopyBox(vbox, box);
hypre_BoxManEntryGetExtents(boxman_entries[ii],
hypre_BoxIMin(map_box),
hypre_BoxIMax(map_box));
hypre_IntersectBoxes(box, map_box, int_box);
hypre_CopyBox(int_box, box);
nrows = 0;
for (ei = 0; ei < nentries; ei++)
{
entry = entries[ei];
hypre_CopyBox(box, to_box);
offset = shape[entry];
hypre_BoxShiftPos(to_box, offset);
hypre_SStructGridIntersect(dom_grid, part, vars[entry], to_box, -1,
&boxman_to_entries, &nboxman_to_entries);
for (jj = 0; jj < nboxman_to_entries; jj++)
{
hypre_SStructBoxManEntryGetStrides(boxman_to_entries[jj],
cs, matrix_type);
hypre_BoxManEntryGetExtents(boxman_to_entries[jj],
hypre_BoxIMin(map_box),
hypre_BoxIMax(map_box));
hypre_IntersectBoxes(to_box, map_box, int_box);
hypre_CopyIndex(hypre_BoxIMin(int_box), index);
hypre_SStructBoxManEntryGetGlobalRank(boxman_to_entries[jj],
index, &col_base, matrix_type);
hypre_BoxShiftNeg(int_box, offset);
hypre_CopyIndex(hypre_BoxIMin(int_box), index);
hypre_SStructBoxManEntryGetGlobalRank(boxman_entries[ii],
index, &row_base, matrix_type);
start = hypre_BoxIMin(int_box);
hypre_BoxGetSize(int_box, loop_size);
hypre_BoxLoop2Begin(ndim, loop_size,
int_box, start, stride, mi,
vbox, start, stride, vi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,mi,vi,index,d) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(mi, vi)
{
hypre_BoxLoopGetIndex(index);
rows[nrows + mi] = row_base;
cols[nrows + mi] = col_base;
for (d = 0; d < ndim; d++)
{
rows[nrows + mi] += index[d]*rs[d];
cols[nrows + mi] += index[d]*cs[d];
}
ijvalues[nrows + mi] = values[ei + vi*nentries];
}
hypre_BoxLoop2End(mi, vi);
nrows += hypre_BoxVolume(int_box);
} /* end loop through boxman to entries */
hypre_TFree(boxman_to_entries);
} /* end of ei nentries loop */
/*------------------------------------------
* set IJ values one stencil entry at a time
*------------------------------------------*/
if (action > 0)
{
HYPRE_IJMatrixAddToValues(ijmatrix, nrows, ncols,
(const HYPRE_Int *) rows,
(const HYPRE_Int *) cols,
(const HYPRE_Complex *) ijvalues);
}
else if (action > -1)
{
HYPRE_IJMatrixSetValues(ijmatrix, nrows, ncols,
(const HYPRE_Int *) rows,
(const HYPRE_Int *) cols,
(const HYPRE_Complex *) ijvalues);
}
else
{
HYPRE_IJMatrixGetValues(ijmatrix, nrows, ncols, rows, cols, values);
}
} /* end loop through boxman entries */
hypre_TFree(boxman_entries);
hypre_TFree(ncols);
hypre_TFree(rows);
hypre_TFree(cols);
hypre_TFree(ijvalues);
hypre_BoxDestroy(to_box);
hypre_BoxDestroy(map_box);
hypre_BoxDestroy(int_box);
}
/*------------------------------------------
* non-stencil entries
*------------------------------------------*/
else
{
/* RDF: THREAD (Check safety on UMatrixSetValues call) */
hypre_BoxGetSize(vbox, loop_size);
hypre_BoxLoop0Begin(ndim, loop_size);
hypre_BoxLoopSetOneBlock();
hypre_BoxLoop0For()
{
hypre_BoxLoopGetIndex(index);
hypre_AddIndexes(index, hypre_BoxIMin(vbox), ndim, index);
hypre_SStructUMatrixSetValues(matrix, part, index, var,
nentries, entries, values, action);
values += nentries;
}
hypre_BoxLoop0End();
}
hypre_BoxDestroy(box);
hypre_BoxDestroy(vbox);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixAssemble( hypre_SStructMatrix *matrix )
{
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
HYPRE_IJMatrixAssemble(ijmatrix);
HYPRE_IJMatrixGetObject(
ijmatrix, (void **) &hypre_SStructMatrixParCSRMatrix(matrix));
return hypre_error_flag;
}
/*==========================================================================
* SStructMatrix routines
*==========================================================================*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixRef( hypre_SStructMatrix *matrix,
hypre_SStructMatrix **matrix_ref )
{
hypre_SStructMatrixRefCount(matrix) ++;
*matrix_ref = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSplitEntries( hypre_SStructMatrix *matrix,
HYPRE_Int part,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Int *nSentries_ptr,
HYPRE_Int **Sentries_ptr,
HYPRE_Int *nUentries_ptr,
HYPRE_Int **Uentries_ptr )
{
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
HYPRE_Int *split = hypre_SStructMatrixSplit(matrix, part, var);
hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var);
HYPRE_Int entry;
HYPRE_Int i;
HYPRE_Int nSentries = 0;
HYPRE_Int *Sentries = hypre_SStructMatrixSEntries(matrix);
HYPRE_Int nUentries = 0;
HYPRE_Int *Uentries = hypre_SStructMatrixUEntries(matrix);
for (i = 0; i < nentries; i++)
{
entry = entries[i];
if (entry < hypre_SStructStencilSize(stencil))
{
/* stencil entries */
if (split[entry] > -1)
{
Sentries[nSentries] = split[entry];
nSentries++;
}
else
{
Uentries[nUentries] = entry;
nUentries++;
}
}
else
{
/* non-stencil entries */
Uentries[nUentries] = entry;
nUentries++;
}
}
*nSentries_ptr = nSentries;
*Sentries_ptr = Sentries;
*nUentries_ptr = nUentries;
*Uentries_ptr = Uentries;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
* (action =-2): get values and zero out
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSetValues( HYPRE_SStructMatrix matrix,
HYPRE_Int part,
HYPRE_Int *index,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid);
HYPRE_Int *Sentries;
HYPRE_Int *Uentries;
HYPRE_Int nSentries;
HYPRE_Int nUentries;
hypre_SStructPMatrix *pmatrix;
hypre_Index cindex;
hypre_SStructMatrixSplitEntries(matrix, part, var, nentries, entries,
&nSentries, &Sentries,
&nUentries, &Uentries);
hypre_CopyToCleanIndex(index, ndim, cindex);
/* S-matrix */
if (nSentries > 0)
{
pmatrix = hypre_SStructMatrixPMatrix(matrix, part);
hypre_SStructPMatrixSetValues(pmatrix, cindex, var,
nSentries, Sentries, values, action);
/* put inter-part couplings in UMatrix and zero them out in PMatrix
* (possibly in ghost zones) */
if (nvneighbors[part][var] > 0)
{
hypre_SStructMatrixSetInterPartValues(matrix, part, cindex, cindex, var,
nSentries, entries, values, action);
}
}
/* U-matrix */
if (nUentries > 0)
{
hypre_SStructUMatrixSetValues(matrix, part, cindex, var,
nUentries, Uentries, values, action);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
* (action =-2): get values and zero out
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSetBoxValues( HYPRE_SStructMatrix matrix,
HYPRE_Int part,
HYPRE_Int *ilower,
HYPRE_Int *iupper,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid);
HYPRE_Int *Sentries;
HYPRE_Int *Uentries;
HYPRE_Int nSentries;
HYPRE_Int nUentries;
hypre_SStructPMatrix *pmatrix;
hypre_Index cilower;
hypre_Index ciupper;
hypre_SStructMatrixSplitEntries(matrix, part, var, nentries, entries,
&nSentries, &Sentries,
&nUentries, &Uentries);
hypre_CopyToCleanIndex(ilower, ndim, cilower);
hypre_CopyToCleanIndex(iupper, ndim, ciupper);
/* S-matrix */
if (nSentries > 0)
{
pmatrix = hypre_SStructMatrixPMatrix(matrix, part);
hypre_SStructPMatrixSetBoxValues(pmatrix, cilower, ciupper, var,
nSentries, Sentries, values, action);
/* put inter-part couplings in UMatrix and zero them out in PMatrix
* (possibly in ghost zones) */
if (nvneighbors[part][var] > 0)
{
hypre_SStructMatrixSetInterPartValues(matrix, part, cilower, ciupper, var,
nSentries, entries, values, action);
}
}
/* U-matrix */
if (nUentries > 0)
{
hypre_SStructUMatrixSetBoxValues(matrix, part, cilower, ciupper, var,
nUentries, Uentries, values, action);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Put inter-part couplings in UMatrix and zero them out in PMatrix (possibly in
* ghost zones). Assumes that all entries are stencil entries.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSetInterPartValues( HYPRE_SStructMatrix matrix,
HYPRE_Int part,
hypre_Index ilower,
hypre_Index iupper,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
hypre_SStructPMatrix *pmatrix;
hypre_SStructPGrid *pgrid;
hypre_SStructStencil *stencil;
hypre_Index *shape;
HYPRE_Int *smap;
HYPRE_Int *vars, frvartype, tovartype;
hypre_StructMatrix *smatrix;
hypre_Box *box, *vbox, *ibox0, *ibox1, *tobox, *frbox;
hypre_Index stride, loop_size;
hypre_IndexRef offset, start;
hypre_BoxManEntry **frentries, **toentries;
hypre_SStructBoxManInfo *frinfo, *toinfo;
HYPRE_Complex *tvalues = NULL;
HYPRE_Int nfrentries, ntoentries, frpart, topart;
HYPRE_Int entry, sentry, ei, fri, toi, vi, mi;
pmatrix = hypre_SStructMatrixPMatrix(matrix, part);
pgrid = hypre_SStructPMatrixPGrid(pmatrix);
frvartype = hypre_SStructPGridVarType(pgrid, var);
box = hypre_BoxCreate(ndim);
vbox = hypre_BoxCreate(ndim);
ibox0 = hypre_BoxCreate(ndim);
ibox1 = hypre_BoxCreate(ndim);
tobox = hypre_BoxCreate(ndim);
frbox = hypre_BoxCreate(ndim);
stencil = hypre_SStructPMatrixStencil(pmatrix, var);
smap = hypre_SStructPMatrixSMap(pmatrix, var);
shape = hypre_SStructStencilShape(stencil);
vars = hypre_SStructStencilVars(stencil);
hypre_BoxSetExtents(vbox, ilower, iupper);
hypre_SetIndex(stride, 1);
for (ei = 0; ei < nentries; ei++)
{
entry = entries[ei];
sentry = smap[entry];
offset = shape[entry];
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entry]);
tovartype = hypre_SStructPGridVarType(pgrid, vars[entry]);
/* shift box in the stencil offset direction */
hypre_BoxSetExtents(box, ilower, iupper);
hypre_AddIndexes(hypre_BoxIMin(box), offset, ndim, hypre_BoxIMin(box));
hypre_AddIndexes(hypre_BoxIMax(box), offset, ndim, hypre_BoxIMax(box));
/* get "to" entries */
hypre_SStructGridIntersect(grid, part, vars[entry], box, -1,
&toentries, &ntoentries);
for (toi = 0; toi < ntoentries; toi++)
{
hypre_BoxManEntryGetExtents(
toentries[toi], hypre_BoxIMin(tobox), hypre_BoxIMax(tobox));
hypre_IntersectBoxes(box, tobox, ibox0);
if (hypre_BoxVolume(ibox0))
{
hypre_SStructBoxManEntryGetPart(toentries[toi], part, &topart);
/* shift ibox0 back */
hypre_SubtractIndexes(hypre_BoxIMin(ibox0), offset, ndim,
hypre_BoxIMin(ibox0));
hypre_SubtractIndexes(hypre_BoxIMax(ibox0), offset, ndim,
hypre_BoxIMax(ibox0));
/* get "from" entries */
hypre_SStructGridIntersect(grid, part, var, ibox0, -1,
&frentries, &nfrentries);
for (fri = 0; fri < nfrentries; fri++)
{
/* don't set couplings within the same part unless possibly for
* cell data (to simplify periodic conditions for users) */
hypre_SStructBoxManEntryGetPart(frentries[fri], part, &frpart);
if (topart == frpart)
{
if ( (frvartype != HYPRE_SSTRUCT_VARIABLE_CELL) ||
(tovartype != HYPRE_SSTRUCT_VARIABLE_CELL) )
{
continue;
}
hypre_BoxManEntryGetInfo(frentries[fri], (void **) &frinfo);
hypre_BoxManEntryGetInfo(toentries[toi], (void **) &toinfo);
if ( hypre_SStructBoxManInfoType(frinfo) ==
hypre_SStructBoxManInfoType(toinfo) )
{
continue;
}
}
hypre_BoxManEntryGetExtents(
frentries[fri], hypre_BoxIMin(frbox), hypre_BoxIMax(frbox));
hypre_IntersectBoxes(ibox0, frbox, ibox1);
if (hypre_BoxVolume(ibox1))
{
tvalues =
hypre_TReAlloc(tvalues, HYPRE_Complex, hypre_BoxVolume(ibox1));
if (action >= 0)
{
/* set or add */
/* copy values into tvalues */
start = hypre_BoxIMin(ibox1);
hypre_BoxGetSize(ibox1, loop_size);
hypre_BoxLoop2Begin(ndim, loop_size,
ibox1, start, stride, mi,
vbox, start, stride, vi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,mi,vi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(mi, vi)
{
tvalues[mi] = values[ei + vi*nentries];
}
hypre_BoxLoop2End(mi, vi);
/* put values into UMatrix */
hypre_SStructUMatrixSetBoxValues(
matrix, part, hypre_BoxIMin(ibox1), hypre_BoxIMax(ibox1),
var, 1, &entry, tvalues, action);
/* zero out values in PMatrix (possibly in ghost) */
hypre_StructMatrixClearBoxValues(
smatrix, ibox1, 1, &sentry, -1, 1);
}
else
{
/* get */
/* get values from UMatrix */
hypre_SStructUMatrixSetBoxValues(
matrix, part, hypre_BoxIMin(ibox1), hypre_BoxIMax(ibox1),
var, 1, &entry, tvalues, action);
/* copy tvalues into values */
start = hypre_BoxIMin(ibox1);
hypre_BoxGetSize(ibox1, loop_size);
hypre_BoxLoop2Begin(ndim, loop_size,
ibox1, start, stride, mi,
vbox, start, stride, vi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,mi,vi) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(mi, vi)
{
values[ei + vi*nentries] = tvalues[mi];
}
hypre_BoxLoop2End(mi, vi);
} /* end if action */
} /* end if nonzero ibox1 */
} /* end of "from" boxman entries loop */
hypre_TFree(frentries);
} /* end if nonzero ibox0 */
} /* end of "to" boxman entries loop */
hypre_TFree(toentries);
} /* end of entries loop */
hypre_BoxDestroy(box);
hypre_BoxDestroy(vbox);
hypre_BoxDestroy(ibox0);
hypre_BoxDestroy(ibox1);
hypre_BoxDestroy(tobox);
hypre_BoxDestroy(frbox);
hypre_TFree(tvalues);
return hypre_error_flag;
}
|
GB_unaryop__ainv_uint8_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint8_fp64
// op(A') function: GB_tran__ainv_uint8_fp64
// C type: uint8_t
// A type: double
// cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8)
// unaryop: cij = -aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z ; GB_CAST_UNSIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint8_fp64
(
uint8_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mkldnn_graph.h | // Copyright (C) 2018 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <string>
#include <vector>
#include <memory>
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
#include "ie_parallel.hpp"
#include "mkldnn_memory.h"
#include "config.h"
#include "perf_count.h"
#include "mkldnn_dims.h"
#include "mean_image.h"
#include "mkldnn_node.h"
#include "mkldnn_edge.h"
#include "mkldnn_extension_utils.h"
#include "mkldnn_streams.h"
namespace MKLDNNPlugin {
class MKLDNNGraph {
public:
typedef std::shared_ptr<MKLDNNGraph> Ptr;
enum Status {
NotReady = 0,
Ready = 1,
};
MKLDNNGraph(): status(NotReady), eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0)) {}
Status GetStatus() {
return status;
}
bool IsReady() {
return (GetStatus() == Ready);
}
void setConfig(const Config &cfg);
void setProperty(const std::map<std::string, std::string> &properties);
Config getProperty();
void getInputBlobs(InferenceEngine::BlobMap &in_map);
void getOutputBlobs(InferenceEngine::BlobMap &out_map);
void CreateGraph(const InferenceEngine::ICNNNetwork &network, const MKLDNNExtensionManager::Ptr& extMgr);
bool hasMeanImageFor(const std::string& name) {
return _meanImages.find(name) != _meanImages.end();
}
void PushInputData(const std::string& name, const InferenceEngine::Blob::Ptr &in);
void PullOutputData(InferenceEngine::BlobMap &out);
void Infer(int batch = -1);
std::vector<MKLDNNNodePtr>& GetNodes() {
return graphNodes;
}
std::vector<MKLDNNEdgePtr>& GetEdges() {
return graphEdges;
}
std::vector<MKLDNNNodePtr>& GetOutputNodes() {
return outputNodes;
}
mkldnn::engine getEngine() const {
return eng;
}
void GetPerfData(std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> &perfMap) const;
void RemoveDroppedNodes();
void RemoveDroppedEdges();
void DropNode(const MKLDNNNodePtr& node);
void CreateArena(int threads_per_stream) {
#if IE_THREAD == IE_THREAD_OMP
omp_set_num_threads(threads_per_stream);
#elif IE_THREAD == IE_THREAD_TBB
ptrArena = std::unique_ptr<tbb::task_arena>(new tbb::task_arena(threads_per_stream));
#endif
}
void CreateObserver(int _stream_id, int _threads_per_stream, int _pinning_step = 1) {
#if IE_THREAD == IE_THREAD_TBB
ptrObserver
= std::unique_ptr<tbb::task_scheduler_observer>(
new pinning_observer(*ptrArena.get(), _stream_id, _threads_per_stream, _pinning_step));
#else
cpu_set_t *process_mask = nullptr;
int ncpus = 0;
get_process_mask(ncpus, process_mask);
#if IE_THREAD == IE_THREAD_OMP
#pragma omp parallel for
for (int thread_index = 0; thread_index < _threads_per_stream; thread_index++) {
pin_thread_to_vacant_core(_stream_id * _threads_per_stream + thread_index, 1, ncpus, process_mask);
}
#elif IE_THREAD == IE_THREAD_SEQ
pin_thread_to_vacant_core(_stream_id * _threads_per_stream, 1, ncpus, process_mask);
#endif
CPU_FREE(process_mask);
#endif
}
protected:
MKLDNNNodePtr FindNodeWithName(const std::string& name) const;
void VisitNode(MKLDNNNodePtr node, std::vector<MKLDNNNodePtr>& sortedNodes);
void SortTopologically();
void ForgetGraphData() {
status = NotReady;
eng = mkldnn::engine(mkldnn::engine::kind::cpu, 0);
inputNodes.clear();
outputNodes.clear();
graphNodes.clear();
graphEdges.clear();
_meanImages.clear();
}
Status status;
Config config;
MKLDNNMemoryPtr memWorkspace;
std::map<std::string, MKLDNNNodePtr> inputNodes;
std::vector<MKLDNNNodePtr> outputNodes;
std::vector<MKLDNNNodePtr> graphNodes;
std::vector<MKLDNNEdgePtr> graphEdges;
std::map<std::string, MeanImage> _meanImages;
#if IE_THREAD == IE_THREAD_TBB
std::unique_ptr<tbb::task_arena> ptrArena;
std::unique_ptr<tbb::task_scheduler_observer> ptrObserver;
#endif
mkldnn::engine eng;
void InitNodes();
void InitEdges();
void Allocate();
void AllocateWithReuse();
void CreatePrimitives();
void do_before(const std::string &dir, const MKLDNNNodePtr &node);
void do_after(const std::string &dir, const MKLDNNNodePtr &node);
friend class MKLDNNInferRequest;
friend class MKLDNNGraphlessInferRequest;
friend std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph);
private:
void dumpToDotFile(std::string file) const;
struct ParsedLayer {
MKLDNNNodePtr parent;
InferenceEngine::CNNLayerPtr cnnLayer;
size_t outIdx;
};
void ParseNode(const InferenceEngine::CNNLayerPtr& cnnLayer, MKLDNNNodePtr& parent,
const MKLDNNExtensionManager::Ptr& extMgr, size_t outIdx, std::vector<ParsedLayer>& layers);
};
class MKLDNNExecNetwork: public InferenceEngine::ExecutableNetworkThreadSafeDefault {
public:
typedef std::shared_ptr<MKLDNNExecNetwork> Ptr;
InferenceEngine::InferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
void CreateInferRequest(InferenceEngine::IInferRequest::Ptr &asyncRequest) override;
MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network, const Config &cfg,
const MKLDNNExtensionManager::Ptr& extMgr);
~MKLDNNExecNetwork() {
graphs.clear();
extensionManager.reset();
}
void setProperty(const std::map<std::string, std::string> &properties);
protected:
std::vector<MKLDNNGraph::Ptr> graphs;
MKLDNNExtensionManager::Ptr extensionManager;
bool CanProcessDynBatch(const InferenceEngine::ICNNNetwork &network) const;
};
} // namespace MKLDNNPlugin
|
common.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_UTILS_COMMON_H_
#define LIGHTGBM_UTILS_COMMON_H_
#if ((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__)))
#include <LightGBM/utils/common_legacy_solaris.h>
#endif
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <limits>
#include <string>
#include <algorithm>
#include <chrono>
#include <cmath>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <iomanip>
#include <iterator>
#include <map>
#include <memory>
#include <sstream>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
#if (!((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))))
#define FMT_HEADER_ONLY
#include "../../../external_libs/fmt/include/fmt/format.h"
#endif
#include "../../../external_libs/fast_double_parser/include/fast_double_parser.h"
#ifdef _MSC_VER
#include <intrin.h>
#pragma intrinsic(_BitScanReverse)
#endif
#if defined(_MSC_VER)
#include <malloc.h>
#elif MM_MALLOC
#include <mm_malloc.h>
// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
// https://www.oreilly.com/library/view/mac-os-x/0596003560/ch05s01s02.html
#elif defined(__GNUC__) && defined(HAVE_MALLOC_H)
#include <malloc.h>
#define _mm_malloc(a, b) memalign(b, a)
#define _mm_free(a) free(a)
#else
#include <stdlib.h>
#define _mm_malloc(a, b) malloc(a)
#define _mm_free(a) free(a)
#endif
namespace LightGBM {
namespace Common {
/*!
* Imbues the stream with the C locale.
*/
static void C_stringstream(std::stringstream &ss) {
ss.imbue(std::locale::classic());
}
inline static char tolower(char in) {
if (in <= 'Z' && in >= 'A')
return in - ('Z' - 'z');
return in;
}
inline static std::string Trim(std::string str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1);
str.erase(0, str.find_first_not_of(" \f\n\r\t\v"));
return str;
}
inline static std::string RemoveQuotationSymbol(std::string str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of("'\"") + 1);
str.erase(0, str.find_first_not_of("'\""));
return str;
}
inline static bool StartsWith(const std::string& str, const std::string prefix) {
if (str.substr(0, prefix.size()) == prefix) {
return true;
} else {
return false;
}
}
inline static std::vector<std::string> Split(const char* c_str, char delimiter) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == delimiter) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> SplitBrackets(const char* c_str, char left_delimiter, char right_delimiter) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
bool open = false;
while (pos < str.length()) {
if (str[pos] == left_delimiter) {
open = true;
++pos;
i = pos;
} else if (str[pos] == right_delimiter && open) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
open = false;
++pos;
} else {
++pos;
}
}
return ret;
}
inline static std::vector<std::string> SplitLines(const char* c_str) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == '\n' || str[pos] == '\r') {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
// skip the line endings
while (str[pos] == '\n' || str[pos] == '\r') ++pos;
// new begin
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
bool met_delimiters = false;
for (int j = 0; delimiters[j] != '\0'; ++j) {
if (str[pos] == delimiters[j]) {
met_delimiters = true;
break;
}
}
if (met_delimiters) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
template<typename T>
inline static const char* Atoi(const char* p, T* out) {
int sign;
T value;
while (*p == ' ') {
++p;
}
sign = 1;
if (*p == '-') {
sign = -1;
++p;
} else if (*p == '+') {
++p;
}
for (value = 0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10 + (*p - '0');
}
*out = static_cast<T>(sign * value);
while (*p == ' ') {
++p;
}
return p;
}
template<typename T>
inline static double Pow(T base, int power) {
if (power < 0) {
return 1.0 / Pow(base, -power);
} else if (power == 0) {
return 1;
} else if (power % 2 == 0) {
return Pow(base*base, power / 2);
} else if (power % 3 == 0) {
return Pow(base*base*base, power / 3);
} else {
return base * Pow(base, power - 1);
}
}
inline static const char* Atof(const char* p, double* out) {
int frac;
double sign, value, scale;
*out = NAN;
// Skip leading white space, if any.
while (*p == ' ') {
++p;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
++p;
} else if (*p == '+') {
++p;
}
// is a number
if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') {
// Get digits before decimal point or exponent, if any.
for (value = 0.0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double right = 0.0;
int nn = 0;
++p;
while (*p >= '0' && *p <= '9') {
right = (*p - '0') + right * 10.0;
++nn;
++p;
}
value += right / Pow(10.0, nn);
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
uint32_t expon;
// Get sign of exponent, if any.
++p;
if (*p == '-') {
frac = 1;
++p;
} else if (*p == '+') {
++p;
}
// Get digits of exponent, if any.
for (expon = 0; *p >= '0' && *p <= '9'; ++p) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
*out = sign * (frac ? (value / scale) : (value * scale));
} else {
size_t cnt = 0;
while (*(p + cnt) != '\0' && *(p + cnt) != ' '
&& *(p + cnt) != '\t' && *(p + cnt) != ','
&& *(p + cnt) != '\n' && *(p + cnt) != '\r'
&& *(p + cnt) != ':') {
++cnt;
}
if (cnt > 0) {
std::string tmp_str(p, cnt);
std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower);
if (tmp_str == std::string("na") || tmp_str == std::string("nan") ||
tmp_str == std::string("null")) {
*out = NAN;
} else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) {
*out = sign * 1e308;
} else {
Log::Fatal("Unknown token %s in data file", tmp_str.c_str());
}
p += cnt;
}
}
while (*p == ' ') {
++p;
}
return p;
}
// Use fast_double_parse and strtod (if parse failed) to parse double.
inline static const char* AtofPrecise(const char* p, double* out) {
const char* end = fast_double_parser::parse_number(p, out);
if (end != nullptr) {
return end;
}
// Rare path: Not in RFC 7159 format. Possible "inf", "nan", etc. Fallback to standard library:
char* end2;
errno = 0; // This is Required before calling strtod.
*out = std::strtod(p, &end2); // strtod is locale aware.
if (end2 == p) {
Log::Fatal("no conversion to double for: %s", p);
}
if (errno == ERANGE) {
Log::Fatal("convert to double got underflow or overflow: %s", p);
}
return end2;
}
inline static bool AtoiAndCheck(const char* p, int* out) {
const char* after = Atoi(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static bool AtofAndCheck(const char* p, double* out) {
const char* after = Atof(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static const char* SkipSpaceAndTab(const char* p) {
while (*p == ' ' || *p == '\t') {
++p;
}
return p;
}
inline static const char* SkipReturn(const char* p) {
while (*p == '\n' || *p == '\r' || *p == ' ') {
++p;
}
return p;
}
template<typename T, typename T2>
inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) {
std::vector<T2> ret(arr.size());
for (size_t i = 0; i < arr.size(); ++i) {
ret[i] = static_cast<T2>(arr[i]);
}
return ret;
}
template<typename T, bool is_float>
struct __StringToTHelper {
T operator()(const std::string& str) const {
T ret = 0;
Atoi(str.c_str(), &ret);
return ret;
}
};
template<typename T>
struct __StringToTHelper<T, true> {
T operator()(const std::string& str) const {
return static_cast<T>(std::stod(str));
}
};
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, char delimiter) {
std::vector<std::string> strs = Split(str.c_str(), delimiter);
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T>
inline static std::vector<std::vector<T>> StringToArrayofArrays(
const std::string& str, char left_bracket, char right_bracket, char delimiter) {
std::vector<std::string> strs = SplitBrackets(str.c_str(), left_bracket, right_bracket);
std::vector<std::vector<T>> ret;
for (const auto& s : strs) {
ret.push_back(StringToArray<T>(s, delimiter));
}
return ret;
}
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
std::vector<std::string> strs = Split(str.c_str(), ' ');
CHECK_EQ(strs.size(), static_cast<size_t>(n));
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T, bool is_float>
struct __StringToTHelperFast {
const char* operator()(const char*p, T* out) const {
return Atoi(p, out);
}
};
template<typename T>
struct __StringToTHelperFast<T, true> {
const char* operator()(const char*p, T* out) const {
double tmp = 0.0f;
auto ret = Atof(p, &tmp);
*out = static_cast<T>(tmp);
return ret;
}
};
template<typename T>
inline static std::vector<T> StringToArrayFast(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
auto p_str = str.c_str();
__StringToTHelperFast<T, std::is_floating_point<T>::value> helper;
std::vector<T> ret(n);
for (int i = 0; i < n; ++i) {
p_str = helper(p_str, &ret[i]);
}
return ret;
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, const char* delimiter, const bool force_C_locale = false) {
if (strs.empty()) {
return std::string("");
}
std::stringstream str_buf;
if (force_C_locale) {
C_stringstream(str_buf);
}
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[0];
for (size_t i = 1; i < strs.size(); ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
template<>
inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter, const bool force_C_locale) {
if (strs.empty()) {
return std::string("");
}
std::stringstream str_buf;
if (force_C_locale) {
C_stringstream(str_buf);
}
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << static_cast<int16_t>(strs[0]);
for (size_t i = 1; i < strs.size(); ++i) {
str_buf << delimiter;
str_buf << static_cast<int16_t>(strs[i]);
}
return str_buf.str();
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter, const bool force_C_locale = false) {
if (end - start <= 0) {
return std::string("");
}
start = std::min(start, static_cast<size_t>(strs.size()) - 1);
end = std::min(end, static_cast<size_t>(strs.size()));
std::stringstream str_buf;
if (force_C_locale) {
C_stringstream(str_buf);
}
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[start];
for (size_t i = start + 1; i < end; ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
inline static int64_t Pow2RoundUp(int64_t x) {
int64_t t = 1;
for (int i = 0; i < 64; ++i) {
if (t >= x) {
return t;
}
t <<= 1;
}
return 0;
}
/*!
* \brief Do inplace softmax transformation on p_rec
* \param p_rec The input/output vector of the values.
*/
inline static void Softmax(std::vector<double>* p_rec) {
std::vector<double> &rec = *p_rec;
double wmax = rec[0];
for (size_t i = 1; i < rec.size(); ++i) {
wmax = std::max(rec[i], wmax);
}
double wsum = 0.0f;
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] = std::exp(rec[i] - wmax);
wsum += rec[i];
}
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] /= static_cast<double>(wsum);
}
}
inline static void Softmax(const double* input, double* output, int len) {
double wmax = input[0];
for (int i = 1; i < len; ++i) {
wmax = std::max(input[i], wmax);
}
double wsum = 0.0f;
for (int i = 0; i < len; ++i) {
output[i] = std::exp(input[i] - wmax);
wsum += output[i];
}
for (int i = 0; i < len; ++i) {
output[i] /= static_cast<double>(wsum);
}
}
template<typename T>
std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) {
std::vector<const T*> ret;
for (auto t = input.begin(); t !=input.end(); ++t) {
ret.push_back(t->get());
}
return ret;
}
template<typename T1, typename T2>
inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) {
std::vector<std::pair<T1, T2>> arr;
auto& ref_key = *keys;
auto& ref_value = *values;
for (size_t i = start; i < keys->size(); ++i) {
arr.emplace_back(ref_key[i], ref_value[i]);
}
if (!is_reverse) {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first < b.first;
});
} else {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first > b.first;
});
}
for (size_t i = start; i < arr.size(); ++i) {
ref_key[i] = arr[i].first;
ref_value[i] = arr[i].second;
}
}
template <typename T>
inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) {
std::vector<T*> ptr(data->size());
auto& ref_data = *data;
for (size_t i = 0; i < data->size(); ++i) {
ptr[i] = ref_data[i].data();
}
return ptr;
}
template <typename T>
inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) {
std::vector<int> ret(data.size());
for (size_t i = 0; i < data.size(); ++i) {
ret[i] = static_cast<int>(data[i].size());
}
return ret;
}
inline static double AvoidInf(double x) {
if (std::isnan(x)) {
return 0.0;
} else if (x >= 1e300) {
return 1e300;
} else if (x <= -1e300) {
return -1e300;
} else {
return x;
}
}
inline static float AvoidInf(float x) {
if (std::isnan(x)) {
return 0.0f;
} else if (x >= 1e38) {
return 1e38f;
} else if (x <= -1e38) {
return -1e38f;
} else {
return x;
}
}
template<typename _Iter> inline
static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) {
return (0);
}
template<typename _RanIt, typename _Pr, typename _VTRanIt> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) {
size_t len = _Last - _First;
const size_t kMinInnerLen = 1024;
int num_threads = OMP_NUM_THREADS();
if (len <= kMinInnerLen || num_threads <= 1) {
std::sort(_First, _Last, _Pred);
return;
}
size_t inner_size = (len + num_threads - 1) / num_threads;
inner_size = std::max(inner_size, kMinInnerLen);
num_threads = static_cast<int>((len + inner_size - 1) / inner_size);
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < num_threads; ++i) {
size_t left = inner_size*i;
size_t right = left + inner_size;
right = std::min(right, len);
if (right > left) {
std::sort(_First + left, _First + right, _Pred);
}
}
// Buffer for merge.
std::vector<_VTRanIt> temp_buf(len);
_RanIt buf = temp_buf.begin();
size_t s = inner_size;
// Recursive merge
while (s < len) {
int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2));
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < loop_size; ++i) {
size_t left = i * 2 * s;
size_t mid = left + s;
size_t right = mid + s;
right = std::min(len, right);
if (mid >= right) { continue; }
std::copy(_First + left, _First + mid, buf + left);
std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred);
}
s *= 2;
}
}
template<typename _RanIt, typename _Pr> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) {
return ParallelSort(_First, _Last, _Pred, IteratorValType(_First));
}
// Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not
template <typename T>
inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) {
auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) {
std::ostringstream os;
os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]";
Log::Fatal(os.str().c_str(), callername, i);
};
for (int i = 1; i < ny; i += 2) {
if (y[i - 1] < y[i]) {
if (y[i - 1] < ymin) {
fatal_msg(i - 1);
} else if (y[i] > ymax) {
fatal_msg(i);
}
} else {
if (y[i - 1] > ymax) {
fatal_msg(i - 1);
} else if (y[i] < ymin) {
fatal_msg(i);
}
}
}
if (ny & 1) { // odd
if (y[ny - 1] < ymin || y[ny - 1] > ymax) {
fatal_msg(ny - 1);
}
}
}
// One-pass scan over array w with nw elements: find min, max and sum of elements;
// this is useful for checking weight requirements.
template <typename T1, typename T2>
inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) {
T1 minw;
T1 maxw;
T1 sumw;
int i;
if (nw & 1) { // odd
minw = w[0];
maxw = w[0];
sumw = w[0];
i = 2;
} else { // even
if (w[0] < w[1]) {
minw = w[0];
maxw = w[1];
} else {
minw = w[1];
maxw = w[0];
}
sumw = w[0] + w[1];
i = 3;
}
for (; i < nw; i += 2) {
if (w[i - 1] < w[i]) {
minw = std::min(minw, w[i - 1]);
maxw = std::max(maxw, w[i]);
} else {
minw = std::min(minw, w[i]);
maxw = std::max(maxw, w[i - 1]);
}
sumw += w[i - 1] + w[i];
}
if (mi != nullptr) {
*mi = minw;
}
if (ma != nullptr) {
*ma = maxw;
}
if (su != nullptr) {
*su = static_cast<T2>(sumw);
}
}
inline static std::vector<uint32_t> EmptyBitset(int n) {
int size = n / 32;
if (n % 32 != 0) ++size;
return std::vector<uint32_t>(size);
}
template<typename T>
inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) {
auto& ref_v = *vec;
int i1 = val / 32;
int i2 = val % 32;
if (static_cast<int>(vec->size()) < i1 + 1) {
vec->resize(i1 + 1, 0);
}
ref_v[i1] |= (1 << i2);
}
template<typename T>
inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) {
std::vector<uint32_t> ret;
for (int i = 0; i < n; ++i) {
int i1 = vals[i] / 32;
int i2 = vals[i] % 32;
if (static_cast<int>(ret.size()) < i1 + 1) {
ret.resize(i1 + 1, 0);
}
ret[i1] |= (1 << i2);
}
return ret;
}
template<typename T>
inline static bool FindInBitset(const uint32_t* bits, int n, T pos) {
int i1 = pos / 32;
if (i1 >= n) {
return false;
}
int i2 = pos % 32;
return (bits[i1] >> i2) & 1;
}
inline static bool CheckDoubleEqualOrdered(double a, double b) {
double upper = std::nextafter(a, INFINITY);
return b <= upper;
}
inline static double GetDoubleUpperBound(double a) {
return std::nextafter(a, INFINITY);
}
inline static size_t GetLine(const char* str) {
auto start = str;
while (*str != '\0' && *str != '\n' && *str != '\r') {
++str;
}
return str - start;
}
inline static const char* SkipNewLine(const char* str) {
if (*str == '\r') {
++str;
}
if (*str == '\n') {
++str;
}
return str;
}
template <typename T>
static int Sign(T x) {
return (x > T(0)) - (x < T(0));
}
template <typename T>
static T SafeLog(T x) {
if (x > 0) {
return std::log(x);
} else {
return -INFINITY;
}
}
inline bool CheckAllowedJSON(const std::string& s) {
unsigned char char_code;
for (auto c : s) {
char_code = static_cast<unsigned char>(c);
if (char_code == 34 // "
|| char_code == 44 // ,
|| char_code == 58 // :
|| char_code == 91 // [
|| char_code == 93 // ]
|| char_code == 123 // {
|| char_code == 125 // }
) {
return false;
}
}
return true;
}
inline int RoundInt(double x) {
return static_cast<int>(x + 0.5f);
}
template <typename T, std::size_t N = 32>
class AlignmentAllocator {
public:
typedef T value_type;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
inline AlignmentAllocator() throw() {}
template <typename T2>
inline AlignmentAllocator(const AlignmentAllocator<T2, N>&) throw() {}
inline ~AlignmentAllocator() throw() {}
inline pointer adress(reference r) {
return &r;
}
inline const_pointer adress(const_reference r) const {
return &r;
}
inline pointer allocate(size_type n) {
return (pointer)_mm_malloc(n * sizeof(value_type), N);
}
inline void deallocate(pointer p, size_type) {
_mm_free(p);
}
inline void construct(pointer p, const value_type& wert) {
new (p) value_type(wert);
}
inline void destroy(pointer p) {
p->~value_type();
}
inline size_type max_size() const throw() {
return size_type(-1) / sizeof(value_type);
}
template <typename T2>
struct rebind {
typedef AlignmentAllocator<T2, N> other;
};
bool operator!=(const AlignmentAllocator<T, N>& other) const {
return !(*this == other);
}
// Returns true if and only if storage allocated from *this
// can be deallocated from other, and vice versa.
// Always returns true for stateless allocators.
bool operator==(const AlignmentAllocator<T, N>&) const {
return true;
}
};
class Timer {
public:
Timer() {
#ifdef TIMETAG
int num_threads = OMP_NUM_THREADS();
start_time_.resize(num_threads);
stats_.resize(num_threads);
#endif // TIMETAG
}
~Timer() { Print(); }
#ifdef TIMETAG
void Start(const std::string& name) {
auto tid = omp_get_thread_num();
start_time_[tid][name] = std::chrono::steady_clock::now();
}
void Stop(const std::string& name) {
auto cur_time = std::chrono::steady_clock::now();
auto tid = omp_get_thread_num();
if (stats_[tid].find(name) == stats_[tid].end()) {
stats_[tid][name] = std::chrono::duration<double, std::milli>(0);
}
stats_[tid][name] += cur_time - start_time_[tid][name];
}
#else
void Start(const std::string&) {}
void Stop(const std::string&) {}
#endif // TIMETAG
void Print() const {
#ifdef TIMETAG
std::unordered_map<std::string, std::chrono::duration<double, std::milli>>
stats(stats_[0].begin(), stats_[0].end());
for (size_t i = 1; i < stats_.size(); ++i) {
for (auto it = stats_[i].begin(); it != stats_[i].end(); ++it) {
if (stats.find(it->first) == stats.end()) {
stats[it->first] = it->second;
} else {
stats[it->first] += it->second;
}
}
}
std::map<std::string, std::chrono::duration<double, std::milli>> ordered(
stats.begin(), stats.end());
for (auto it = ordered.begin(); it != ordered.end(); ++it) {
Log::Info("%s costs:\t %f", it->first.c_str(), it->second * 1e-3);
}
#endif // TIMETAG
}
#ifdef TIMETAG
std::vector<
std::unordered_map<std::string, std::chrono::steady_clock::time_point>>
start_time_;
std::vector<std::unordered_map<std::string,
std::chrono::duration<double, std::milli>>>
stats_;
#endif // TIMETAG
};
// Note: this class is not thread-safe, don't use it inside omp blocks
class FunctionTimer {
public:
#ifdef TIMETAG
FunctionTimer(const std::string& name, Timer& timer) : timer_(timer) {
timer.Start(name);
name_ = name;
}
~FunctionTimer() { timer_.Stop(name_); }
private:
std::string name_;
Timer& timer_;
#else
FunctionTimer(const std::string&, Timer&) {}
#endif // TIMETAG
};
} // namespace Common
extern Common::Timer global_timer;
/*!
* Provides locale-independent alternatives to Common's methods.
* Essential to make models robust to locale settings.
*/
namespace CommonC {
template<typename T>
inline static std::string Join(const std::vector<T>& strs, const char* delimiter) {
return LightGBM::Common::Join(strs, delimiter, true);
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) {
return LightGBM::Common::Join(strs, start, end, delimiter, true);
}
inline static const char* Atof(const char* p, double* out) {
return LightGBM::Common::Atof(p, out);
}
template<typename T, bool is_float>
struct __StringToTHelperFast {
const char* operator()(const char*p, T* out) const {
return LightGBM::Common::Atoi(p, out);
}
};
/*!
* \warning Beware that ``Common::Atof`` in ``__StringToTHelperFast``,
* has **less** floating point precision than ``__StringToTHelper``.
* Both versions are kept to maintain bit-for-bit the "legacy" LightGBM behaviour in terms of precision.
* Check ``StringToArrayFast`` and ``StringToArray`` for more details on this.
*/
template<typename T>
struct __StringToTHelperFast<T, true> {
const char* operator()(const char*p, T* out) const {
double tmp = 0.0f;
auto ret = Atof(p, &tmp);
*out = static_cast<T>(tmp);
return ret;
}
};
template<typename T, bool is_float>
struct __StringToTHelper {
T operator()(const std::string& str) const {
T ret = 0;
LightGBM::Common::Atoi(str.c_str(), &ret);
return ret;
}
};
/*!
* \warning Beware that ``Common::Atof`` in ``__StringToTHelperFast``,
* has **less** floating point precision than ``__StringToTHelper``.
* Both versions are kept to maintain bit-for-bit the "legacy" LightGBM behaviour in terms of precision.
* Check ``StringToArrayFast`` and ``StringToArray`` for more details on this.
* \note It is possible that ``fast_double_parser::parse_number`` is faster than ``Common::Atof``.
*/
template<typename T>
struct __StringToTHelper<T, true> {
T operator()(const std::string& str) const {
double tmp;
const char* end = Common::AtofPrecise(str.c_str(), &tmp);
if (end == str.c_str()) {
Log::Fatal("Failed to parse double: %s", str.c_str());
}
return static_cast<T>(tmp);
}
};
/*!
* \warning Beware that due to internal use of ``Common::Atof`` in ``__StringToTHelperFast``,
* this method has less precision for floating point numbers than ``StringToArray``,
* which calls ``__StringToTHelper``.
* As such, ``StringToArrayFast`` and ``StringToArray`` are not equivalent!
* Both versions were kept to maintain bit-for-bit the "legacy" LightGBM behaviour in terms of precision.
*/
template<typename T>
inline static std::vector<T> StringToArrayFast(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
auto p_str = str.c_str();
__StringToTHelperFast<T, std::is_floating_point<T>::value> helper;
std::vector<T> ret(n);
for (int i = 0; i < n; ++i) {
p_str = helper(p_str, &ret[i]);
}
return ret;
}
/*!
* \warning Do not replace calls to this method by ``StringToArrayFast``.
* This method is more precise for floating point numbers.
* Check ``StringToArrayFast`` for more details.
*/
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
std::vector<std::string> strs = LightGBM::Common::Split(str.c_str(), ' ');
CHECK_EQ(strs.size(), static_cast<size_t>(n));
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
/*!
* \warning Do not replace calls to this method by ``StringToArrayFast``.
* This method is more precise for floating point numbers.
* Check ``StringToArrayFast`` for more details.
*/
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, char delimiter) {
std::vector<std::string> strs = LightGBM::Common::Split(str.c_str(), delimiter);
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
#if (!((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))))
/*!
* Safely formats a value onto a buffer according to a format string and null-terminates it.
*
* \note It checks that the full value was written or forcefully aborts.
* This safety check serves to prevent incorrect internal API usage.
* Correct usage will never incur in this problem:
* - The received buffer size shall be sufficient at all times for the input format string and value.
*/
template <typename T>
inline static void format_to_buf(char* buffer, const size_t buf_len, const char* format, const T value) {
auto result = fmt::format_to_n(buffer, buf_len, format, value);
if (result.size >= buf_len) {
Log::Fatal("Numerical conversion failed. Buffer is too small.");
}
buffer[result.size] = '\0';
}
template<typename T, bool is_float, bool high_precision>
struct __TToStringHelper {
void operator()(T value, char* buffer, size_t buf_len) const {
format_to_buf(buffer, buf_len, "{}", value);
}
};
template<typename T>
struct __TToStringHelper<T, true, false> {
void operator()(T value, char* buffer, size_t buf_len) const {
format_to_buf(buffer, buf_len, "{:g}", value);
}
};
template<typename T>
struct __TToStringHelper<T, true, true> {
void operator()(T value, char* buffer, size_t buf_len) const {
format_to_buf(buffer, buf_len, "{:.17g}", value);
}
};
/*!
* Converts an array to a string with with values separated by the space character.
* This method replaces Common's ``ArrayToString`` and ``ArrayToStringFast`` functionality
* and is locale-independent.
*
* \note If ``high_precision_output`` is set to true,
* floating point values are output with more digits of precision.
*/
template<bool high_precision_output = false, typename T>
inline static std::string ArrayToString(const std::vector<T>& arr, size_t n) {
if (arr.empty() || n == 0) {
return std::string("");
}
__TToStringHelper<T, std::is_floating_point<T>::value, high_precision_output> helper;
const size_t buf_len = high_precision_output ? 32 : 16;
std::vector<char> buffer(buf_len);
std::stringstream str_buf;
Common::C_stringstream(str_buf);
helper(arr[0], buffer.data(), buf_len);
str_buf << buffer.data();
for (size_t i = 1; i < std::min(n, arr.size()); ++i) {
helper(arr[i], buffer.data(), buf_len);
str_buf << ' ' << buffer.data();
}
return str_buf.str();
}
#endif // (!((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))))
} // namespace CommonC
} // namespace LightGBM
#endif // LIGHTGBM_UTILS_COMMON_H_
|
ocp_nlp_sqp.c | /*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
* Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
* Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
* Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.;
*/
#include "acados/ocp_nlp/ocp_nlp_sqp.h"
// external
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#if defined(ACADOS_WITH_OPENMP)
#include <omp.h>
#endif
// blasfeo
#include "blasfeo/include/blasfeo_d_aux.h"
#include "blasfeo/include/blasfeo_d_aux_ext_dep.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// acados
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h"
#include "acados/ocp_nlp/ocp_nlp_reg_common.h"
#include "acados/ocp_qp/ocp_qp_common.h"
#include "acados/utils/mem.h"
#include "acados/utils/print.h"
#include "acados/utils/timing.h"
#include "acados/utils/types.h"
#include "acados_c/ocp_qp_interface.h"
/************************************************
* options
************************************************/
acados_size_t ocp_nlp_sqp_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
acados_size_t size = 0;
size += sizeof(ocp_nlp_sqp_opts);
size += ocp_nlp_opts_calculate_size(config, dims);
return size;
}
void *ocp_nlp_sqp_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
char *c_ptr = (char *) raw_memory;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_opts);
opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_opts_calculate_size(config, dims);
assert((char *) raw_memory + ocp_nlp_sqp_opts_calculate_size(config, dims) >= c_ptr);
return opts;
}
void ocp_nlp_sqp_opts_initialize_default(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// int ii;
// this first !!!
ocp_nlp_opts_initialize_default(config, dims, nlp_opts);
// SQP opts
opts->max_iter = 20;
opts->tol_stat = 1e-8;
opts->tol_eq = 1e-8;
opts->tol_ineq = 1e-8;
opts->tol_comp = 1e-8;
opts->ext_qp_res = 0;
opts->qp_warm_start = 0;
opts->warm_start_first_qp = false;
opts->rti_phase = 0;
opts->print_level = 0;
opts->initialize_t_slacks = 0;
// overwrite default submodules opts
// qp tolerance
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", &opts->tol_stat);
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", &opts->tol_eq);
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", &opts->tol_ineq);
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", &opts->tol_comp);
return;
}
void ocp_nlp_sqp_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_update(config, dims, nlp_opts);
return;
}
void ocp_nlp_sqp_opts_set(void *config_, void *opts_, const char *field, void* value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) )
{
ocp_nlp_opts_set(config, nlp_opts, field, value);
if (!strcmp(field, "qp_warm_start"))
{
int* i_ptr = (int *) value;
opts->qp_warm_start = *i_ptr;
}
}
else // nlp opts
{
if (!strcmp(field, "max_iter"))
{
int* max_iter = (int *) value;
opts->max_iter = *max_iter;
}
else if (!strcmp(field, "tol_stat"))
{
double* tol_stat = (double *) value;
opts->tol_stat = *tol_stat;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", value);
}
else if (!strcmp(field, "tol_eq"))
{
double* tol_eq = (double *) value;
opts->tol_eq = *tol_eq;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", value);
}
else if (!strcmp(field, "tol_ineq"))
{
double* tol_ineq = (double *) value;
opts->tol_ineq = *tol_ineq;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", value);
}
else if (!strcmp(field, "tol_comp"))
{
double* tol_comp = (double *) value;
opts->tol_comp = *tol_comp;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", value);
}
else if (!strcmp(field, "ext_qp_res"))
{
int* ext_qp_res = (int *) value;
opts->ext_qp_res = *ext_qp_res;
}
else if (!strcmp(field, "warm_start_first_qp"))
{
bool* warm_start_first_qp = (bool *) value;
opts->warm_start_first_qp = *warm_start_first_qp;
}
else if (!strcmp(field, "rti_phase"))
{
int* rti_phase = (int *) value;
if (*rti_phase < 0 || *rti_phase > 0) {
printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field.");
printf("possible values are: 0\n");
exit(1);
} else opts->rti_phase = *rti_phase;
}
else if (!strcmp(field, "print_level"))
{
int* print_level = (int *) value;
if (*print_level < 0)
{
printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for print_level field, need int >=0, got %d.", *print_level);
exit(1);
}
opts->print_level = *print_level;
}
else if (!strcmp(field, "initialize_t_slacks"))
{
int* initialize_t_slacks = (int *) value;
if (*initialize_t_slacks != 0 && *initialize_t_slacks != 1)
{
printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for initialize_t_slacks field, need int 0 or 1, got %d.", *initialize_t_slacks);
exit(1);
}
opts->initialize_t_slacks = *initialize_t_slacks;
}
else
{
ocp_nlp_opts_set(config, nlp_opts, field, value);
}
}
return;
}
void ocp_nlp_sqp_opts_set_at_stage(void *config_, void *opts_, size_t stage, const char *field, void* value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value);
return;
}
/************************************************
* memory
************************************************/
acados_size_t ocp_nlp_sqp_memory_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
acados_size_t size = 0;
size += sizeof(ocp_nlp_sqp_memory);
// nlp mem
size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
int stat_m = opts->max_iter+1;
int stat_n = 6;
if (opts->ext_qp_res)
stat_n += 4;
size += stat_n*stat_m*sizeof(double);
size += 3*8; // align
make_int_multiple_of(8, &size);
return size;
}
void *ocp_nlp_sqp_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// ocp_nlp_dynamics_config **dynamics = config->dynamics;
// ocp_nlp_cost_config **cost = config->cost;
// ocp_nlp_constraints_config **constraints = config->constraints;
char *c_ptr = (char *) raw_memory;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_sqp_memory *mem = (ocp_nlp_sqp_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_memory);
align_char_to(8, &c_ptr);
// nlp mem
mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr);
c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
mem->stat = (double *) c_ptr;
mem->stat_m = opts->max_iter+1;
mem->stat_n = 6;
if (opts->ext_qp_res)
mem->stat_n += 4;
c_ptr += mem->stat_m*mem->stat_n*sizeof(double);
mem->status = ACADOS_READY;
align_char_to(8, &c_ptr);
assert((char *) raw_memory + ocp_nlp_sqp_memory_calculate_size(config, dims, opts) >= c_ptr);
return mem;
}
/************************************************
* workspace
************************************************/
acados_size_t ocp_nlp_sqp_workspace_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
acados_size_t size = 0;
// sqp
size += sizeof(ocp_nlp_sqp_workspace);
// nlp
size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// tmp qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// tmp qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
return size;
}
static void ocp_nlp_sqp_cast_workspace(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_workspace *work)
{
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
// sqp
char *c_ptr = (char *) work;
c_ptr += sizeof(ocp_nlp_sqp_workspace);
// nlp
work->nlp_work = ocp_nlp_workspace_assign(config, dims, nlp_opts, nlp_mem, c_ptr);
c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// tmp qp in
work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// tmp qp out
work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
assert((char *) work + ocp_nlp_sqp_workspace_calculate_size(config, dims, opts) >= c_ptr);
return;
}
/************************************************
* functions
************************************************/
int ocp_nlp_sqp(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
acados_timer timer0, timer1;
acados_tic(&timer0);
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_workspace *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
// zero timers
double total_time = 0.0;
double tmp_time;
mem->time_qp_sol = 0.0;
mem->time_qp_solver_call = 0.0;
mem->time_qp_xcond = 0.0;
mem->time_lin = 0.0;
mem->time_reg = 0.0;
mem->time_tot = 0.0;
mem->time_glob = 0.0;
int N = dims->N;
int ii;
int qp_iter = 0;
int qp_status = 0;
#if defined(ACADOS_WITH_OPENMP)
// backup number of threads
int num_threads_bkp = omp_get_num_threads();
// set number of threads
omp_set_num_threads(opts->nlp_opts->num_threads);
#pragma omp parallel
{ // beginning of parallel region
#endif
// alias to dynamics_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux1_ptr(nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_pi_ptr(nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_BAbt_ptr(nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_dzduxt_ptr(nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_sim_guess_ptr(nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]);
}
// alias to cost_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->cost[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_Z_ptr(nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]);
}
// alias to constraints_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_lam_ptr(nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_DCt_ptr(nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxb_ptr(nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxs_rev_ptr(nlp_mem->qp_in->idxs_rev[ii], nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxe_ptr(nlp_mem->qp_in->idxe[ii], nlp_mem->constraints[ii]);
}
// alias to regularize memory
config->regularize->memory_set_RSQrq_ptr(dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem);
config->regularize->memory_set_rq_ptr(dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem);
config->regularize->memory_set_BAbt_ptr(dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem);
config->regularize->memory_set_b_ptr(dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem);
config->regularize->memory_set_idxb_ptr(dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem);
config->regularize->memory_set_DCt_ptr(dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem);
config->regularize->memory_set_ux_ptr(dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem);
config->regularize->memory_set_pi_ptr(dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem);
config->regularize->memory_set_lam_ptr(dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem);
// copy sampling times into dynamics model
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
// NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute;
// -> remove here and make sure precompute is called everywhere.
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
}
#if defined(ACADOS_WITH_OPENMP)
} // end of parallel region
#endif
//
if (opts->initialize_t_slacks > 0)
ocp_nlp_initialize_t_slacks(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// initialize QP
ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// main sqp loop
int sqp_iter = 0;
nlp_mem->sqp_iter = &sqp_iter;
for (; sqp_iter < opts->max_iter; sqp_iter++)
{
// linearizate NLP and update QP matrices
acados_tic(&timer1);
ocp_nlp_approximate_qp_matrices(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
mem->time_lin += acados_toc(&timer1);
// update QP rhs for SQP (step prim var, abs dual var)
ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// compute nlp residuals
ocp_nlp_res_compute(dims, nlp_in, nlp_out, nlp_mem->nlp_res, nlp_mem);
nlp_out->inf_norm_res = nlp_mem->nlp_res->inf_norm_res_stat;
nlp_out->inf_norm_res = (nlp_mem->nlp_res->inf_norm_res_eq > nlp_out->inf_norm_res) ?
nlp_mem->nlp_res->inf_norm_res_eq :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (nlp_mem->nlp_res->inf_norm_res_ineq > nlp_out->inf_norm_res) ?
nlp_mem->nlp_res->inf_norm_res_ineq :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (nlp_mem->nlp_res->inf_norm_res_comp > nlp_out->inf_norm_res) ?
nlp_mem->nlp_res->inf_norm_res_comp :
nlp_out->inf_norm_res;
if (opts->print_level > sqp_iter + 1)
print_ocp_qp_in(nlp_mem->qp_in);
// save statistics
if (sqp_iter < mem->stat_m)
{
mem->stat[mem->stat_n*sqp_iter+0] = nlp_mem->nlp_res->inf_norm_res_stat;
mem->stat[mem->stat_n*sqp_iter+1] = nlp_mem->nlp_res->inf_norm_res_eq;
mem->stat[mem->stat_n*sqp_iter+2] = nlp_mem->nlp_res->inf_norm_res_ineq;
mem->stat[mem->stat_n*sqp_iter+3] = nlp_mem->nlp_res->inf_norm_res_comp;
}
// exit conditions on residuals
if ((nlp_mem->nlp_res->inf_norm_res_stat < opts->tol_stat) &
(nlp_mem->nlp_res->inf_norm_res_eq < opts->tol_eq) &
(nlp_mem->nlp_res->inf_norm_res_ineq < opts->tol_ineq) &
(nlp_mem->nlp_res->inf_norm_res_comp < opts->tol_comp))
{
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
nlp_out->total_time = total_time;
mem->time_tot = total_time;
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_SUCCESS;
if (opts->print_level > 0)
{
printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_mem->nlp_res->inf_norm_res_stat,
nlp_mem->nlp_res->inf_norm_res_eq, nlp_mem->nlp_res->inf_norm_res_ineq,
nlp_mem->nlp_res->inf_norm_res_comp );
printf("\n\n");
}
return mem->status;
}
// regularize Hessian
acados_tic(&timer1);
config->regularize->regularize_hessian(config->regularize, dims->regularize,
opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// (typically) no warm start at first iteration
if (sqp_iter == 0 && !opts->warm_start_first_qp)
{
int tmp_int = 0;
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts,
"warm_start", &tmp_int);
}
// solve qp
acados_tic(&timer1);
qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, nlp_mem->qp_in, nlp_mem->qp_out,
opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work);
mem->time_qp_sol += acados_toc(&timer1);
qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time);
mem->time_qp_solver_call += tmp_time;
qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time);
mem->time_qp_xcond += tmp_time;
// compute correct dual solution in case of Hessian regularization
acados_tic(&timer1);
config->regularize->correct_dual_sol(config->regularize, dims->regularize,
opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// restore default warm start
if (sqp_iter==0)
{
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts,
"warm_start", &opts->qp_warm_start);
}
// TODO move into QP solver memory ???
qp_info *qp_info_;
ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_);
nlp_out->qp_iter = qp_info_->num_iter;
// printf("\nqp_iter = %d, sqp_iter = %d, max_sqp_iter = %d\n", nlp_out->qp_iter, sqp_iter, opts->max_iter);
qp_iter = qp_info_->num_iter;
// save statistics of last qp solver call
if (sqp_iter+1 < mem->stat_m)
{
mem->stat[mem->stat_n*(sqp_iter+1)+4] = qp_status;
mem->stat[mem->stat_n*(sqp_iter+1)+5] = qp_iter;
}
// compute external QP residuals (for debugging)
if (opts->ext_qp_res)
{
ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out, work->qp_res, work->qp_res_ws);
if (sqp_iter+1 < mem->stat_m)
ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+6));
}
if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER))
{
// print_ocp_qp_in(nlp_mem->qp_in);
if (opts->print_level > 0)
{
printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_mem->nlp_res->inf_norm_res_stat,
nlp_mem->nlp_res->inf_norm_res_eq, nlp_mem->nlp_res->inf_norm_res_ineq,
nlp_mem->nlp_res->inf_norm_res_comp );
printf("\n\n");
}
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
#ifndef ACADOS_SILENT
printf("QP solver returned error status %d in iteration %d\n", qp_status, sqp_iter);
#endif
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
if (opts->print_level > 1)
{
printf("\n Failed to solve the following QP:\n");
if (opts->print_level > sqp_iter + 1)
print_ocp_qp_in(nlp_mem->qp_in);
}
mem->status = ACADOS_QP_FAILURE;
return mem->status;
}
// globalization
acados_tic(&timer1);
double alpha = ocp_nlp_line_search(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
mem->time_glob += acados_toc(&timer1);
// update variables
ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work, alpha);
// ocp_nlp_dims_print(nlp_out->dims);
// ocp_nlp_out_print(nlp_out);
// exit(1);
// ??? @rien
// for (int_t i = 0; i < N; i++)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme == NULL)
// continue;
// opts->sens_adj = (opts->scheme->type != exact);
// if (nlp_in->freezeSens) {
// // freeze inexact sensitivities after first SQP iteration !!
// opts->scheme->freeze = true;
// }
// }
if (opts->print_level > 0)
{
if (sqp_iter%10 == 0)
{
printf("# it\tstat\t\teq\t\tineq\t\tcomp\n");
}
printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_mem->nlp_res->inf_norm_res_stat,
nlp_mem->nlp_res->inf_norm_res_eq, nlp_mem->nlp_res->inf_norm_res_ineq, nlp_mem->nlp_res->inf_norm_res_comp );
}
}
// stop timer
total_time += acados_toc(&timer0);
if (opts->print_level > 0)
printf("\n\n");
// ocp_nlp_out_print(nlp_out);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
// maximum number of iterations reached
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_MAXITER;
#ifndef ACADOS_SILENT
printf("\n ocp_nlp_sqp: maximum iterations reached\n");
#endif
return mem->status;
}
int ocp_nlp_sqp_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
// ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_sqp_workspace *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
int N = dims->N;
int status = ACADOS_SUCCESS;
int ii;
// TODO(all) add flag to enable/disable checks
for (ii = 0; ii <= N; ii++)
{
int module_val;
config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii], "ns", &module_val);
if (dims->ns[ii] != module_val)
{
printf("ocp_nlp_sqp_precompute: inconsistent dimension ns for stage %d with constraint module, got %d, module: %d.",
ii, dims->ns[ii], module_val);
exit(1);
}
}
// precompute
for (ii = 0; ii < N; ii++)
{
// set T
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
// dynamics precompute
status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], opts->nlp_opts->dynamics[ii],
nlp_mem->dynamics[ii], nlp_work->dynamics[ii]);
if (status != ACADOS_SUCCESS)
return status;
}
return status;
}
void ocp_nlp_sqp_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_,
char *field, int stage, int index, void *sens_nlp_out_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_out *sens_nlp_out = sens_nlp_out_;
ocp_nlp_sqp_workspace *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in);
d_ocp_qp_set_rhs_zero(work->tmp_qp_in);
double one = 1.0;
if ((!strcmp("ex", field)) & (stage==0))
{
d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in);
d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in);
// d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in);
config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out,
opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work);
// d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out);
// exit(1);
/* copy tmp_qp_out into sens_nlp_out */
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
// int *nz = dims->nz;
for (i = 0; i <= N; i++)
{
blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0);
if (i < N)
blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0);
}
}
else
{
printf("\nerror: field %s at stage %d not available in ocp_nlp_sqp_eval_param_sens\n", field, stage);
exit(1);
}
return;
}
// TODO rename memory_get ???
void ocp_nlp_sqp_get(void *config_, void *dims_, void *mem_, const char *field, void *return_value_)
{
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
ocp_nlp_sqp_memory *mem = mem_;
if (!strcmp("sqp_iter", field))
{
int *value = return_value_;
*value = mem->sqp_iter;
}
else if (!strcmp("status", field))
{
int *value = return_value_;
*value = mem->status;
}
else if (!strcmp("time_tot", field) || !strcmp("tot_time", field))
{
double *value = return_value_;
*value = mem->time_tot;
}
else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field))
{
double *value = return_value_;
*value = mem->time_qp_sol;
}
else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field))
{
double *value = return_value_;
*value = mem->time_qp_solver_call;
}
else if (!strcmp("time_qp_xcond", field))
{
double *value = return_value_;
*value = mem->time_qp_xcond;
}
else if (!strcmp("time_lin", field))
{
double *value = return_value_;
*value = mem->time_lin;
}
else if (!strcmp("time_reg", field))
{
double *value = return_value_;
*value = mem->time_reg;
}
else if (!strcmp("time_glob", field))
{
double *value = return_value_;
*value = mem->time_glob;
}
else if (!strcmp("time_sim", field) || !strcmp("time_sim_ad", field) || !strcmp("time_sim_la", field))
{
double tmp = 0.0;
double *ptr = return_value_;
int N = dims->N;
int ii;
for (ii=0; ii<N; ii++)
{
config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], field, &tmp);
*ptr += tmp;
}
}
else if (!strcmp("stat", field))
{
double **value = return_value_;
*value = mem->stat;
}
else if (!strcmp("statistics", field))
{
int n_row = mem->stat_m<mem->sqp_iter+1 ? mem->stat_m : mem->sqp_iter+1;
double *value = return_value_;
for (int ii=0; ii<n_row; ii++)
{
value[ii+0] = ii;
for (int jj=0; jj<mem->stat_n; jj++)
value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n];
}
}
else if (!strcmp("stat_m", field))
{
int *value = return_value_;
*value = mem->stat_m;
}
else if (!strcmp("stat_n", field))
{
int *value = return_value_;
*value = mem->stat_n;
}
else if (!strcmp("nlp_mem", field))
{
void **value = return_value_;
*value = mem->nlp_mem;
}
else if (!strcmp("qp_xcond_dims", field))
{
void **value = return_value_;
*value = dims->qp_solver->xcond_dims;
}
else if (!strcmp("nlp_res", field))
{
ocp_nlp_res **value = return_value_;
*value = mem->nlp_mem->nlp_res;
}
else if (!strcmp("qp_xcond_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_in;
}
else if (!strcmp("qp_xcond_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_out;
}
else if (!strcmp("qp_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_in;
}
else if (!strcmp("qp_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_out;
}
else if (!strcmp("qp_iter", field))
{
config->qp_solver->memory_get(config->qp_solver,
mem->nlp_mem->qp_solver_mem, "iter", return_value_);
}
else if (!strcmp("res_stat", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_stat;
}
else if (!strcmp("res_eq", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_eq;
}
else if (!strcmp("res_ineq", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_ineq;
}
else if (!strcmp("res_comp", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_comp;
}
else if (!strcmp("cost_value", field))
{
double *value = return_value_;
*value = mem->nlp_mem->cost_value;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_opts_get(void *config_, void *dims_, void *opts_,
const char *field, void *return_value_)
{
// ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
if (!strcmp("nlp_opts", field))
{
void **value = return_value_;
*value = opts->nlp_opts;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_opts_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_work_get(void *config_, void *dims_, void *work_,
const char *field, void *return_value_)
{
// ocp_nlp_config *config = config_;
ocp_nlp_sqp_workspace *work = work_;
if (!strcmp("nlp_work", field))
{
void **value = return_value_;
*value = work->nlp_work;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_work_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_config_initialize_default(void *config_)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
config->opts_calculate_size = &ocp_nlp_sqp_opts_calculate_size;
config->opts_assign = &ocp_nlp_sqp_opts_assign;
config->opts_initialize_default = &ocp_nlp_sqp_opts_initialize_default;
config->opts_update = &ocp_nlp_sqp_opts_update;
config->opts_set = &ocp_nlp_sqp_opts_set;
config->opts_set_at_stage = &ocp_nlp_sqp_opts_set_at_stage;
config->memory_calculate_size = &ocp_nlp_sqp_memory_calculate_size;
config->memory_assign = &ocp_nlp_sqp_memory_assign;
config->workspace_calculate_size = &ocp_nlp_sqp_workspace_calculate_size;
config->evaluate = &ocp_nlp_sqp;
config->eval_param_sens = &ocp_nlp_sqp_eval_param_sens;
config->config_initialize_default = &ocp_nlp_sqp_config_initialize_default;
config->precompute = &ocp_nlp_sqp_precompute;
config->get = &ocp_nlp_sqp_get;
config->opts_get = &ocp_nlp_sqp_opts_get;
config->work_get = &ocp_nlp_sqp_work_get;
return;
}
|
sstruct_matrix.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_SStructPMatrix class.
*
*****************************************************************************/
#include "_hypre_sstruct_mv.h"
/*==========================================================================
* SStructPMatrix routines
*==========================================================================*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixRef( hypre_SStructPMatrix *matrix,
hypre_SStructPMatrix **matrix_ref )
{
hypre_SStructPMatrixRefCount(matrix) ++;
*matrix_ref = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixCreate( MPI_Comm comm,
hypre_SStructPGrid *pgrid,
hypre_SStructStencil **stencils,
hypre_SStructPMatrix **pmatrix_ptr )
{
hypre_SStructPMatrix *pmatrix;
HYPRE_Int nvars;
HYPRE_Int **smaps;
hypre_StructStencil ***sstencils;
hypre_StructMatrix ***smatrices;
HYPRE_Int **symmetric;
hypre_StructStencil *sstencil;
HYPRE_Int *vars;
hypre_Index *sstencil_shape;
HYPRE_Int sstencil_size;
HYPRE_Int new_dim;
HYPRE_Int *new_sizes;
hypre_Index **new_shapes;
HYPRE_Int size;
hypre_StructGrid *sgrid;
HYPRE_Int vi, vj;
HYPRE_Int i, j, k;
pmatrix = hypre_TAlloc(hypre_SStructPMatrix, 1, HYPRE_MEMORY_HOST);
hypre_SStructPMatrixComm(pmatrix) = comm;
hypre_SStructPMatrixPGrid(pmatrix) = pgrid;
hypre_SStructPMatrixStencils(pmatrix) = stencils;
nvars = hypre_SStructPGridNVars(pgrid);
hypre_SStructPMatrixNVars(pmatrix) = nvars;
/* create sstencils */
smaps = hypre_TAlloc(HYPRE_Int *, nvars, HYPRE_MEMORY_HOST);
sstencils = hypre_TAlloc(hypre_StructStencil **, nvars, HYPRE_MEMORY_HOST);
new_sizes = hypre_TAlloc(HYPRE_Int, nvars, HYPRE_MEMORY_HOST);
new_shapes = hypre_TAlloc(hypre_Index *, nvars, HYPRE_MEMORY_HOST);
size = 0;
for (vi = 0; vi < nvars; vi++)
{
sstencils[vi] = hypre_TAlloc(hypre_StructStencil *, nvars, HYPRE_MEMORY_HOST);
for (vj = 0; vj < nvars; vj++)
{
sstencils[vi][vj] = NULL;
new_sizes[vj] = 0;
}
sstencil = hypre_SStructStencilSStencil(stencils[vi]);
vars = hypre_SStructStencilVars(stencils[vi]);
sstencil_shape = hypre_StructStencilShape(sstencil);
sstencil_size = hypre_StructStencilSize(sstencil);
smaps[vi] = hypre_TAlloc(HYPRE_Int, sstencil_size, HYPRE_MEMORY_HOST);
for (i = 0; i < sstencil_size; i++)
{
j = vars[i];
new_sizes[j]++;
}
for (vj = 0; vj < nvars; vj++)
{
if (new_sizes[vj])
{
new_shapes[vj] = hypre_TAlloc(hypre_Index, new_sizes[vj], HYPRE_MEMORY_HOST);
new_sizes[vj] = 0;
}
}
for (i = 0; i < sstencil_size; i++)
{
j = vars[i];
k = new_sizes[j];
hypre_CopyIndex(sstencil_shape[i], new_shapes[j][k]);
smaps[vi][i] = k;
new_sizes[j]++;
}
new_dim = hypre_StructStencilNDim(sstencil);
for (vj = 0; vj < nvars; vj++)
{
if (new_sizes[vj])
{
sstencils[vi][vj] =
hypre_StructStencilCreate(new_dim, new_sizes[vj], new_shapes[vj]);
}
size = hypre_max(size, new_sizes[vj]);
}
}
hypre_SStructPMatrixSMaps(pmatrix) = smaps;
hypre_SStructPMatrixSStencils(pmatrix) = sstencils;
hypre_TFree(new_sizes, HYPRE_MEMORY_HOST);
hypre_TFree(new_shapes, HYPRE_MEMORY_HOST);
/* create smatrices */
smatrices = hypre_TAlloc(hypre_StructMatrix **, nvars, HYPRE_MEMORY_HOST);
for (vi = 0; vi < nvars; vi++)
{
smatrices[vi] = hypre_TAlloc(hypre_StructMatrix *, nvars, HYPRE_MEMORY_HOST);
for (vj = 0; vj < nvars; vj++)
{
smatrices[vi][vj] = NULL;
if (sstencils[vi][vj] != NULL)
{
sgrid = hypre_SStructPGridSGrid(pgrid, vi);
smatrices[vi][vj] =
hypre_StructMatrixCreate(comm, sgrid, sstencils[vi][vj]);
}
}
}
hypre_SStructPMatrixSMatrices(pmatrix) = smatrices;
/* create symmetric */
symmetric = hypre_TAlloc(HYPRE_Int *, nvars, HYPRE_MEMORY_HOST);
for (vi = 0; vi < nvars; vi++)
{
symmetric[vi] = hypre_TAlloc(HYPRE_Int, nvars, HYPRE_MEMORY_HOST);
for (vj = 0; vj < nvars; vj++)
{
symmetric[vi][vj] = 0;
}
}
hypre_SStructPMatrixSymmetric(pmatrix) = symmetric;
hypre_SStructPMatrixSEntriesSize(pmatrix) = size;
hypre_SStructPMatrixSEntries(pmatrix) = hypre_TAlloc(HYPRE_Int, size, HYPRE_MEMORY_HOST);
hypre_SStructPMatrixRefCount(pmatrix) = 1;
*pmatrix_ptr = pmatrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixDestroy( hypre_SStructPMatrix *pmatrix )
{
hypre_SStructStencil **stencils;
HYPRE_Int nvars;
HYPRE_Int **smaps;
hypre_StructStencil ***sstencils;
hypre_StructMatrix ***smatrices;
HYPRE_Int **symmetric;
HYPRE_Int vi, vj;
if (pmatrix)
{
hypre_SStructPMatrixRefCount(pmatrix) --;
if (hypre_SStructPMatrixRefCount(pmatrix) == 0)
{
stencils = hypre_SStructPMatrixStencils(pmatrix);
nvars = hypre_SStructPMatrixNVars(pmatrix);
smaps = hypre_SStructPMatrixSMaps(pmatrix);
sstencils = hypre_SStructPMatrixSStencils(pmatrix);
smatrices = hypre_SStructPMatrixSMatrices(pmatrix);
symmetric = hypre_SStructPMatrixSymmetric(pmatrix);
for (vi = 0; vi < nvars; vi++)
{
HYPRE_SStructStencilDestroy(stencils[vi]);
hypre_TFree(smaps[vi], HYPRE_MEMORY_HOST);
for (vj = 0; vj < nvars; vj++)
{
hypre_StructStencilDestroy(sstencils[vi][vj]);
hypre_StructMatrixDestroy(smatrices[vi][vj]);
}
hypre_TFree(sstencils[vi], HYPRE_MEMORY_HOST);
hypre_TFree(smatrices[vi], HYPRE_MEMORY_HOST);
hypre_TFree(symmetric[vi], HYPRE_MEMORY_HOST);
}
hypre_TFree(stencils, HYPRE_MEMORY_HOST);
hypre_TFree(smaps, HYPRE_MEMORY_HOST);
hypre_TFree(sstencils, HYPRE_MEMORY_HOST);
hypre_TFree(smatrices, HYPRE_MEMORY_HOST);
hypre_TFree(symmetric, HYPRE_MEMORY_HOST);
hypre_TFree(hypre_SStructPMatrixSEntries(pmatrix), HYPRE_MEMORY_HOST);
hypre_TFree(pmatrix, HYPRE_MEMORY_HOST);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixInitialize( hypre_SStructPMatrix *pmatrix )
{
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
HYPRE_Int **symmetric = hypre_SStructPMatrixSymmetric(pmatrix);
hypre_StructMatrix *smatrix;
HYPRE_Int vi, vj;
/* HYPRE_Int num_ghost[2*HYPRE_MAXDIM]; */
/* HYPRE_Int vi, vj, d, ndim; */
#if 0
ndim = hypre_SStructPMatrixNDim(pmatrix);
/* RDF: Why are the ghosts being reset to one? Maybe it needs to be at least
* one to set shared coefficients correctly, but not exactly one? */
for (d = 0; d < ndim; d++)
{
num_ghost[2*d] = num_ghost[2*d+1] = 1;
}
#endif
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
HYPRE_StructMatrixSetSymmetric(smatrix, symmetric[vi][vj]);
/* hypre_StructMatrixSetNumGhost(smatrix, num_ghost); */
hypre_StructMatrixInitialize(smatrix);
/* needed to get AddTo accumulation correct between processors */
hypre_StructMatrixClearGhostValues(smatrix);
}
}
}
hypre_SStructPMatrixAccumulated(pmatrix) = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixSetValues( hypre_SStructPMatrix *pmatrix,
hypre_Index index,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
hypre_SStructStencil *stencil = hypre_SStructPMatrixStencil(pmatrix, var);
HYPRE_Int *smap = hypre_SStructPMatrixSMap(pmatrix, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_StructMatrix *smatrix;
hypre_BoxArray *grid_boxes;
hypre_Box *box, *grow_box;
HYPRE_Int *sentries;
HYPRE_Int i;
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entries[0]]);
sentries = hypre_SStructPMatrixSEntries(pmatrix);
for (i = 0; i < nentries; i++)
{
sentries[i] = smap[entries[i]];
}
/* set values inside the grid */
hypre_StructMatrixSetValues(smatrix, index, nentries, sentries, values,
action, -1, 0);
/* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */
if (action != 0)
{
/* AddTo/Get */
hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix);
hypre_Index varoffset;
HYPRE_Int done = 0;
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
hypre_ForBoxI(i, grid_boxes)
{
box = hypre_BoxArrayBox(grid_boxes, i);
if (hypre_IndexInBox(index, box))
{
done = 1;
break;
}
}
if (!done)
{
grow_box = hypre_BoxCreate(hypre_BoxArrayNDim(grid_boxes));
hypre_SStructVariableGetOffset(hypre_SStructPGridVarType(pgrid, var),
hypre_SStructPGridNDim(pgrid), varoffset);
hypre_ForBoxI(i, grid_boxes)
{
box = hypre_BoxArrayBox(grid_boxes, i);
hypre_CopyBox(box, grow_box);
hypre_BoxGrowByIndex(grow_box, varoffset);
if (hypre_IndexInBox(index, grow_box))
{
hypre_StructMatrixSetValues(smatrix, index, nentries, sentries,
values, action, i, 1);
break;
}
}
hypre_BoxDestroy(grow_box);
}
}
else
{
/* Set */
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
hypre_ForBoxI(i, grid_boxes)
{
box = hypre_BoxArrayBox(grid_boxes, i);
if (!hypre_IndexInBox(index, box))
{
hypre_StructMatrixClearValues(smatrix, index, nentries, sentries, i, 1);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
* (action =-2): get values and zero out
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixSetBoxValues( hypre_SStructPMatrix *pmatrix,
hypre_Box *set_box,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
hypre_Box *value_box,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructPMatrixNDim(pmatrix);
hypre_SStructStencil *stencil = hypre_SStructPMatrixStencil(pmatrix, var);
HYPRE_Int *smap = hypre_SStructPMatrixSMap(pmatrix, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_StructMatrix *smatrix;
hypre_BoxArray *grid_boxes;
HYPRE_Int *sentries;
HYPRE_Int i, j;
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entries[0]]);
sentries = hypre_SStructPMatrixSEntries(pmatrix);
for (i = 0; i < nentries; i++)
{
sentries[i] = smap[entries[i]];
}
/* set values inside the grid */
hypre_StructMatrixSetBoxValues(smatrix, set_box, value_box, nentries, sentries,
values, action, -1, 0);
/* TODO: Why need DeviceSync? */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
HYPRE_CUDA_CALL(cudaDeviceSynchronize());
#endif
/* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */
if (action != 0)
{
/* AddTo/Get */
hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix);
hypre_Index varoffset;
hypre_BoxArray *left_boxes, *done_boxes, *temp_boxes;
hypre_Box *left_box, *done_box, *int_box;
hypre_SStructVariableGetOffset(hypre_SStructPGridVarType(pgrid, var),
hypre_SStructPGridNDim(pgrid), varoffset);
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
left_boxes = hypre_BoxArrayCreate(1, ndim);
done_boxes = hypre_BoxArrayCreate(2, ndim);
temp_boxes = hypre_BoxArrayCreate(0, ndim);
/* done_box always points to the first box in done_boxes */
done_box = hypre_BoxArrayBox(done_boxes, 0);
/* int_box always points to the second box in done_boxes */
int_box = hypre_BoxArrayBox(done_boxes, 1);
hypre_CopyBox(set_box, hypre_BoxArrayBox(left_boxes, 0));
hypre_BoxArraySetSize(left_boxes, 1);
hypre_SubtractBoxArrays(left_boxes, grid_boxes, temp_boxes);
hypre_BoxArraySetSize(done_boxes, 0);
hypre_ForBoxI(i, grid_boxes)
{
hypre_SubtractBoxArrays(left_boxes, done_boxes, temp_boxes);
hypre_BoxArraySetSize(done_boxes, 1);
hypre_CopyBox(hypre_BoxArrayBox(grid_boxes, i), done_box);
hypre_BoxGrowByIndex(done_box, varoffset);
hypre_ForBoxI(j, left_boxes)
{
left_box = hypre_BoxArrayBox(left_boxes, j);
hypre_IntersectBoxes(left_box, done_box, int_box);
hypre_StructMatrixSetBoxValues(smatrix, int_box, value_box,
nentries, sentries,
values, action, i, 1);
}
}
hypre_BoxArrayDestroy(left_boxes);
hypre_BoxArrayDestroy(done_boxes);
hypre_BoxArrayDestroy(temp_boxes);
}
else
{
/* Set */
hypre_BoxArray *diff_boxes;
hypre_Box *grid_box, *diff_box;
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
diff_boxes = hypre_BoxArrayCreate(0, ndim);
hypre_ForBoxI(i, grid_boxes)
{
grid_box = hypre_BoxArrayBox(grid_boxes, i);
hypre_BoxArraySetSize(diff_boxes, 0);
hypre_SubtractBoxes(set_box, grid_box, diff_boxes);
hypre_ForBoxI(j, diff_boxes)
{
diff_box = hypre_BoxArrayBox(diff_boxes, j);
hypre_StructMatrixClearBoxValues(smatrix, diff_box, nentries, sentries,
i, 1);
}
}
hypre_BoxArrayDestroy(diff_boxes);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixAccumulate( hypre_SStructPMatrix *pmatrix )
{
hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix);
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
HYPRE_Int ndim = hypre_SStructPGridNDim(pgrid);
HYPRE_SStructVariable *vartypes = hypre_SStructPGridVarTypes(pgrid);
hypre_StructMatrix *smatrix;
hypre_Index varoffset;
HYPRE_Int num_ghost[2*HYPRE_MAXDIM];
hypre_StructGrid *sgrid;
HYPRE_Int vi, vj, d;
hypre_CommInfo *comm_info;
hypre_CommPkg *comm_pkg;
hypre_CommHandle *comm_handle;
/* if values already accumulated, just return */
if (hypre_SStructPMatrixAccumulated(pmatrix))
{
return hypre_error_flag;
}
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
sgrid = hypre_StructMatrixGrid(smatrix);
/* assumes vi and vj vartypes are the same */
hypre_SStructVariableGetOffset(vartypes[vi], ndim, varoffset);
for (d = 0; d < ndim; d++)
{
num_ghost[2*d] = num_ghost[2*d+1] = hypre_IndexD(varoffset, d);
}
/* accumulate values from AddTo */
hypre_CreateCommInfoFromNumGhost(sgrid, num_ghost, &comm_info);
hypre_CommPkgCreate(comm_info,
hypre_StructMatrixDataSpace(smatrix),
hypre_StructMatrixDataSpace(smatrix),
hypre_StructMatrixNumValues(smatrix), NULL, 1,
hypre_StructMatrixComm(smatrix),
&comm_pkg);
hypre_InitializeCommunication(comm_pkg,
hypre_StructMatrixData(smatrix),
hypre_StructMatrixData(smatrix),
1, 0, &comm_handle);
hypre_FinalizeCommunication(comm_handle);
hypre_CommInfoDestroy(comm_info);
hypre_CommPkgDestroy(comm_pkg);
}
}
}
hypre_SStructPMatrixAccumulated(pmatrix) = 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixAssemble( hypre_SStructPMatrix *pmatrix )
{
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
hypre_StructMatrix *smatrix;
HYPRE_Int vi, vj;
hypre_SStructPMatrixAccumulate(pmatrix);
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
hypre_StructMatrixClearGhostValues(smatrix);
hypre_StructMatrixAssemble(smatrix);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixSetSymmetric( hypre_SStructPMatrix *pmatrix,
HYPRE_Int var,
HYPRE_Int to_var,
HYPRE_Int symmetric )
{
HYPRE_Int **pmsymmetric = hypre_SStructPMatrixSymmetric(pmatrix);
HYPRE_Int vstart = var;
HYPRE_Int vsize = 1;
HYPRE_Int tstart = to_var;
HYPRE_Int tsize = 1;
HYPRE_Int v, t;
if (var == -1)
{
vstart = 0;
vsize = hypre_SStructPMatrixNVars(pmatrix);
}
if (to_var == -1)
{
tstart = 0;
tsize = hypre_SStructPMatrixNVars(pmatrix);
}
for (v = vstart; v < vsize; v++)
{
for (t = tstart; t < tsize; t++)
{
pmsymmetric[v][t] = symmetric;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixPrint( const char *filename,
hypre_SStructPMatrix *pmatrix,
HYPRE_Int all )
{
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
hypre_StructMatrix *smatrix;
HYPRE_Int vi, vj;
char new_filename[255];
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
hypre_sprintf(new_filename, "%s.%02d.%02d", filename, vi, vj);
hypre_StructMatrixPrint(new_filename, smatrix, all);
}
}
}
return hypre_error_flag;
}
/*==========================================================================
* SStructUMatrix routines
*==========================================================================*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixInitialize( hypre_SStructMatrix *matrix )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
HYPRE_Int nparts = hypre_SStructGraphNParts(graph);
hypre_SStructPGrid **pgrids = hypre_SStructGraphPGrids(graph);
hypre_SStructStencil ***stencils = hypre_SStructGraphStencils(graph);
HYPRE_Int nUventries = hypre_SStructGraphNUVEntries(graph);
HYPRE_Int *iUventries = hypre_SStructGraphIUVEntries(graph);
hypre_SStructUVEntry **Uventries = hypre_SStructGraphUVEntries(graph);
HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid);
hypre_StructGrid *sgrid;
hypre_SStructStencil *stencil;
HYPRE_Int *split;
HYPRE_Int nvars;
HYPRE_Int nrows, rowstart, nnzs ;
HYPRE_Int part, var, entry, b, m, mi;
HYPRE_Int *row_sizes;
HYPRE_Int max_row_size;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Box *ghost_box;
hypre_IndexRef start;
hypre_Index loop_size, stride;
HYPRE_IJMatrixSetObjectType(ijmatrix, HYPRE_PARCSR);
#ifdef HYPRE_USING_OPENMP
HYPRE_IJMatrixSetOMPFlag(ijmatrix, 1); /* Use OpenMP */
#endif
if (matrix_type == HYPRE_SSTRUCT || matrix_type == HYPRE_STRUCT)
{
rowstart = hypre_SStructGridGhstartRank(grid);
nrows = hypre_SStructGridGhlocalSize(grid) ;
}
else /* matrix_type == HYPRE_PARCSR */
{
rowstart = hypre_SStructGridStartRank(grid);
nrows = hypre_SStructGridLocalSize(grid);
}
/* set row sizes */
m = 0;
max_row_size = 0;
ghost_box = hypre_BoxCreate(ndim);
row_sizes = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
hypre_SetIndex(stride, 1);
for (part = 0; part < nparts; part++)
{
nvars = hypre_SStructPGridNVars(pgrids[part]);
for (var = 0; var < nvars; var++)
{
sgrid = hypre_SStructPGridSGrid(pgrids[part], var);
stencil = stencils[part][var];
split = hypre_SStructMatrixSplit(matrix, part, var);
nnzs = 0;
for (entry = 0; entry < hypre_SStructStencilSize(stencil); entry++)
{
if (split[entry] == -1)
{
nnzs++;
}
}
#if 0
/* TODO: For now, assume stencil is full/complete */
if (hypre_SStructMatrixSymmetric(matrix))
{
nnzs = 2*nnzs - 1;
}
#endif
boxes = hypre_StructGridBoxes(sgrid);
hypre_ForBoxI(b, boxes)
{
box = hypre_BoxArrayBox(boxes, b);
hypre_CopyBox(box, ghost_box);
if (matrix_type == HYPRE_SSTRUCT || matrix_type == HYPRE_STRUCT)
{
hypre_BoxGrowByArray(ghost_box, hypre_StructGridNumGhost(sgrid));
}
start = hypre_BoxIMin(box);
hypre_BoxGetSize(box, loop_size);
zypre_BoxLoop1Begin(hypre_SStructMatrixNDim(matrix), loop_size,
ghost_box, start, stride, mi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,mi) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop1For(mi)
{
row_sizes[m+mi] = nnzs;
}
zypre_BoxLoop1End(mi);
m += hypre_BoxVolume(ghost_box);
}
max_row_size = hypre_max(max_row_size, nnzs);
if (nvneighbors[part][var])
{
max_row_size =
hypre_max(max_row_size, hypre_SStructStencilSize(stencil));
}
}
}
hypre_BoxDestroy(ghost_box);
/* GEC0902 essentially for each UVentry we figure out how many extra columns
* we need to add to the rowsizes */
/* RDF: THREAD? */
for (entry = 0; entry < nUventries; entry++)
{
mi = iUventries[entry];
m = hypre_SStructUVEntryRank(Uventries[mi]) - rowstart;
if ((m > -1) && (m < nrows))
{
row_sizes[m] += hypre_SStructUVEntryNUEntries(Uventries[mi]);
max_row_size = hypre_max(max_row_size, row_sizes[m]);
}
}
/* ZTODO: Update row_sizes based on neighbor off-part couplings */
HYPRE_IJMatrixSetRowSizes (ijmatrix, (const HYPRE_Int *) row_sizes);
hypre_TFree(row_sizes, HYPRE_MEMORY_HOST);
hypre_SStructMatrixTmpColCoords(matrix) =
hypre_CTAlloc(HYPRE_BigInt, max_row_size, HYPRE_MEMORY_HOST);
hypre_SStructMatrixTmpCoeffs(matrix) =
hypre_CTAlloc(HYPRE_Complex, max_row_size, HYPRE_MEMORY_HOST);
HYPRE_IJMatrixInitialize(ijmatrix);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
*
* 9/09 - AB: modified to use the box manager - here we need to check the
* neighbor box manager also
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixSetValues( hypre_SStructMatrix *matrix,
HYPRE_Int part,
hypre_Index index,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
hypre_SStructGrid *dom_grid = hypre_SStructGraphDomainGrid(graph);
hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_Index *shape = hypre_SStructStencilShape(stencil);
HYPRE_Int size = hypre_SStructStencilSize(stencil);
hypre_IndexRef offset;
hypre_Index to_index;
hypre_SStructUVEntry *Uventry;
hypre_BoxManEntry *boxman_entry;
hypre_SStructBoxManInfo *entry_info;
HYPRE_BigInt row_coord;
HYPRE_BigInt *col_coords;
HYPRE_Int ncoeffs;
HYPRE_Complex *coeffs;
HYPRE_Int i, entry;
HYPRE_BigInt Uverank;
HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix);
hypre_SStructGridFindBoxManEntry(grid, part, index, var, &boxman_entry);
/* if not local, check neighbors */
if (boxman_entry == NULL)
hypre_SStructGridFindNborBoxManEntry(grid, part, index, var, &boxman_entry);
if (boxman_entry == NULL)
{
hypre_error_in_arg(1);
hypre_error_in_arg(2);
hypre_error_in_arg(3);
return hypre_error_flag;
}
else
{
hypre_BoxManEntryGetInfo(boxman_entry, (void **) &entry_info);
}
hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index,
&row_coord, matrix_type);
col_coords = hypre_SStructMatrixTmpColCoords(matrix);
coeffs = hypre_SStructMatrixTmpCoeffs(matrix);
ncoeffs = 0;
for (i = 0; i < nentries; i++)
{
entry = entries[i];
if (entry < size)
{
/* stencil entries */
offset = shape[entry];
hypre_AddIndexes(index, offset, ndim, to_index);
hypre_SStructGridFindBoxManEntry(dom_grid, part, to_index, vars[entry],
&boxman_entry);
/* if not local, check neighbors */
if (boxman_entry == NULL)
hypre_SStructGridFindNborBoxManEntry(dom_grid, part, to_index,
vars[entry], &boxman_entry);
if (boxman_entry != NULL)
{
hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, to_index,
&col_coords[ncoeffs],matrix_type);
coeffs[ncoeffs] = values[i];
ncoeffs++;
}
}
else
{
/* non-stencil entries */
entry -= size;
hypre_SStructGraphGetUVEntryRank(graph, part, var, index, &Uverank);
if (Uverank > -1)
{
Uventry = hypre_SStructGraphUVEntry(graph, Uverank);
col_coords[ncoeffs] = hypre_SStructUVEntryToRank(Uventry, entry);
coeffs[ncoeffs] = values[i];
ncoeffs++;
}
}
}
if (action > 0)
{
HYPRE_IJMatrixAddToValues(ijmatrix, 1, &ncoeffs, &row_coord,
(const HYPRE_BigInt *) col_coords,
(const HYPRE_Complex *) coeffs);
}
else if (action > -1)
{
HYPRE_IJMatrixSetValues(ijmatrix, 1, &ncoeffs, &row_coord,
(const HYPRE_BigInt *) col_coords,
(const HYPRE_Complex *) coeffs);
}
else
{
HYPRE_IJMatrixGetValues(ijmatrix, 1, &ncoeffs, &row_coord,
col_coords, values);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Note: Entries must all be of type stencil or non-stencil, but not both.
*
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
*
* 9/09 - AB: modified to use the box manager- here we need to check the
* neighbor box manager also
*
* To illustrate what is computed below before calling IJSetValues2(), consider
* the following example of a 5-pt stencil (c,w,e,s,n) on a 3x2 grid (the 'x' in
* arrays 'cols' and 'ijvalues' indicates "no data"):
*
* nrows = 6
* ncols = 3 4 3 3 4 3
* rows = 0 1 2 3 4 5
* row_indexes = 0 5 10 15 20 25
* cols = . . . x x . . . . x . . . x x . . . x x . . . . x . . . x x
* ijvalues = . . . x x . . . . x . . . x x . . . x x . . . . x . . . x x
* entry = c e n c w e n c w n c e s c w e s c w s
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixSetBoxValues( hypre_SStructMatrix *matrix,
HYPRE_Int part,
hypre_Box *set_box,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
hypre_Box *value_box,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
hypre_SStructGrid *dom_grid = hypre_SStructGraphDomainGrid(graph);
hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_Index *shape = hypre_SStructStencilShape(stencil);
HYPRE_Int size = hypre_SStructStencilSize(stencil);
hypre_IndexRef offset;
hypre_BoxManEntry **boxman_entries;
HYPRE_Int nboxman_entries;
hypre_BoxManEntry **boxman_to_entries;
HYPRE_Int nboxman_to_entries;
HYPRE_Int nrows;
HYPRE_Int *ncols, *row_indexes;;
HYPRE_BigInt *rows, *cols;
HYPRE_Complex *ijvalues;
hypre_Box *box;
hypre_Box *to_box;
hypre_Box *map_box;
hypre_Box *int_box;
hypre_Index index, stride, loop_size;
hypre_IndexRef start;
hypre_Index rs, cs;
HYPRE_BigInt row_base, col_base;
HYPRE_Int ei, entry, ii, jj, i;
HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix);
box = hypre_BoxCreate(ndim);
/*------------------------------------------
* all stencil entries
*------------------------------------------*/
if (entries[0] < size)
{
to_box = hypre_BoxCreate(ndim);
map_box = hypre_BoxCreate(ndim);
int_box = hypre_BoxCreate(ndim);
nrows = hypre_BoxVolume(set_box);
ncols = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_SHARED);
rows = hypre_CTAlloc(HYPRE_BigInt, nrows, HYPRE_MEMORY_SHARED);
row_indexes = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_SHARED);
cols = hypre_CTAlloc(HYPRE_BigInt, nrows*nentries, HYPRE_MEMORY_SHARED);
ijvalues = hypre_CTAlloc(HYPRE_Complex, nrows*nentries, HYPRE_MEMORY_SHARED);
hypre_SetIndex(stride, 1);
hypre_SStructGridIntersect(grid, part, var, set_box, -1,
&boxman_entries, &nboxman_entries);
for (ii = 0; ii < nboxman_entries; ii++)
{
hypre_SStructBoxManEntryGetStrides(boxman_entries[ii], rs, matrix_type);
hypre_CopyBox(set_box, box);
hypre_BoxManEntryGetExtents(boxman_entries[ii],
hypre_BoxIMin(map_box), hypre_BoxIMax(map_box));
hypre_IntersectBoxes(box, map_box, int_box);
hypre_CopyBox(int_box, box);
/* For each index in 'box', compute a row of length <= nentries and
* insert it into an nentries-length segment of 'cols' and 'ijvalues'.
* This may result in gaps, but IJSetValues2() is designed for that. */
nrows = hypre_BoxVolume(box);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < nrows; i++)
{
ncols[i] = 0;
row_indexes[i] = i*nentries;
}
for (ei = 0; ei < nentries; ei++)
{
entry = entries[ei];
hypre_CopyBox(box, to_box);
offset = shape[entry];
hypre_BoxShiftPos(to_box, offset);
hypre_SStructGridIntersect(dom_grid, part, vars[entry], to_box, -1,
&boxman_to_entries, &nboxman_to_entries);
for (jj = 0; jj < nboxman_to_entries; jj++)
{
hypre_SStructBoxManEntryGetStrides(boxman_to_entries[jj], cs, matrix_type);
hypre_BoxManEntryGetExtents(boxman_to_entries[jj],
hypre_BoxIMin(map_box), hypre_BoxIMax(map_box));
hypre_IntersectBoxes(to_box, map_box, int_box);
hypre_CopyIndex(hypre_BoxIMin(int_box), index);
hypre_SStructBoxManEntryGetGlobalRank(boxman_to_entries[jj],
index, &col_base, matrix_type);
hypre_BoxShiftNeg(int_box, offset);
hypre_CopyIndex(hypre_BoxIMin(int_box), index);
hypre_SStructBoxManEntryGetGlobalRank(boxman_entries[ii],
index, &row_base, matrix_type);
start = hypre_BoxIMin(int_box);
hypre_BoxGetSize(int_box, loop_size);
/*FIXME: Currently works only for the default boxloop (see GetIndex below) */
zypre_BoxLoop2Begin(ndim, loop_size,
box, start, stride, mi,
value_box, start, stride, vi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop2For(mi, vi)
{
hypre_Index index;
HYPRE_Int d, ci;
hypre_BoxLoopGetIndex(index); /* FIXME (see comment above) */
ci = mi*nentries + ncols[mi];
rows[mi] = row_base;
cols[ci] = col_base;
for (d = 0; d < ndim; d++)
{
rows[mi] += index[d]*rs[d];
cols[ci] += index[d]*cs[d];
}
ijvalues[ci] = values[ei + vi*nentries];
ncols[mi]++;
}
zypre_BoxLoop2End(mi, vi);
} /* end loop through boxman to entries */
hypre_TFree(boxman_to_entries, HYPRE_MEMORY_HOST);
} /* end of ei nentries loop */
if (action > 0)
{
HYPRE_IJMatrixAddToValues2(ijmatrix, nrows, ncols,
(const HYPRE_BigInt *) rows,
(const HYPRE_Int *) row_indexes,
(const HYPRE_BigInt *) cols,
(const HYPRE_Complex *) ijvalues);
}
else if (action > -1)
{
HYPRE_IJMatrixSetValues2(ijmatrix, nrows, ncols,
(const HYPRE_BigInt *) rows,
(const HYPRE_Int *) row_indexes,
(const HYPRE_BigInt *) cols,
(const HYPRE_Complex *) ijvalues);
}
else
{
HYPRE_IJMatrixGetValues(ijmatrix, nrows, ncols, rows, cols, values);
}
} /* end loop through boxman entries */
hypre_TFree(boxman_entries, HYPRE_MEMORY_HOST);
hypre_TFree(ncols, HYPRE_MEMORY_SHARED);
hypre_TFree(rows, HYPRE_MEMORY_SHARED);
hypre_TFree(row_indexes, HYPRE_MEMORY_SHARED);
hypre_TFree(cols, HYPRE_MEMORY_SHARED);
hypre_TFree(ijvalues, HYPRE_MEMORY_SHARED);
hypre_BoxDestroy(to_box);
hypre_BoxDestroy(map_box);
hypre_BoxDestroy(int_box);
}
/*------------------------------------------
* non-stencil entries
*------------------------------------------*/
else
{
/* RDF: THREAD (Check safety on UMatrixSetValues call) */
hypre_BoxGetSize(set_box, loop_size);
hypre_SerialBoxLoop0Begin(ndim, loop_size);
{
hypre_BoxLoopGetIndex(index);
hypre_AddIndexes(index, hypre_BoxIMin(set_box), ndim, index);
hypre_SStructUMatrixSetValues(matrix, part, index, var,
nentries, entries, values, action);
values += nentries;
}
hypre_SerialBoxLoop0End();
}
hypre_BoxDestroy(box);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixAssemble( hypre_SStructMatrix *matrix )
{
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
HYPRE_IJMatrixAssemble(ijmatrix);
HYPRE_IJMatrixGetObject(
ijmatrix, (void **) &hypre_SStructMatrixParCSRMatrix(matrix));
return hypre_error_flag;
}
/*==========================================================================
* SStructMatrix routines
*==========================================================================*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixRef( hypre_SStructMatrix *matrix,
hypre_SStructMatrix **matrix_ref )
{
hypre_SStructMatrixRefCount(matrix) ++;
*matrix_ref = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSplitEntries( hypre_SStructMatrix *matrix,
HYPRE_Int part,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Int *nSentries_ptr,
HYPRE_Int **Sentries_ptr,
HYPRE_Int *nUentries_ptr,
HYPRE_Int **Uentries_ptr )
{
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
HYPRE_Int *split = hypre_SStructMatrixSplit(matrix, part, var);
hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var);
HYPRE_Int entry;
HYPRE_Int i;
HYPRE_Int nSentries = 0;
HYPRE_Int *Sentries = hypre_SStructMatrixSEntries(matrix);
HYPRE_Int nUentries = 0;
HYPRE_Int *Uentries = hypre_SStructMatrixUEntries(matrix);
for (i = 0; i < nentries; i++)
{
entry = entries[i];
if (entry < hypre_SStructStencilSize(stencil))
{
/* stencil entries */
if (split[entry] > -1)
{
Sentries[nSentries] = split[entry];
nSentries++;
}
else
{
Uentries[nUentries] = entry;
nUentries++;
}
}
else
{
/* non-stencil entries */
Uentries[nUentries] = entry;
nUentries++;
}
}
*nSentries_ptr = nSentries;
*Sentries_ptr = Sentries;
*nUentries_ptr = nUentries;
*Uentries_ptr = Uentries;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
* (action =-2): get values and zero out
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSetValues( HYPRE_SStructMatrix matrix,
HYPRE_Int part,
HYPRE_Int *index,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid);
HYPRE_Int *Sentries;
HYPRE_Int *Uentries;
HYPRE_Int nSentries;
HYPRE_Int nUentries;
hypre_SStructPMatrix *pmatrix;
hypre_Index cindex;
hypre_SStructMatrixSplitEntries(matrix, part, var, nentries, entries,
&nSentries, &Sentries,
&nUentries, &Uentries);
hypre_CopyToCleanIndex(index, ndim, cindex);
/* S-matrix */
if (nSentries > 0)
{
pmatrix = hypre_SStructMatrixPMatrix(matrix, part);
hypre_SStructPMatrixSetValues(pmatrix, cindex, var,
nSentries, Sentries, values, action);
/* put inter-part couplings in UMatrix and zero them out in PMatrix
* (possibly in ghost zones) */
if (nvneighbors[part][var] > 0)
{
hypre_Box *set_box;
HYPRE_Int d;
/* This creates boxes with zeroed-out extents */
set_box = hypre_BoxCreate(ndim);
for (d = 0; d < ndim; d++)
{
hypre_BoxIMinD(set_box, d) = cindex[d];
hypre_BoxIMaxD(set_box, d) = cindex[d];
}
hypre_SStructMatrixSetInterPartValues(matrix, part, set_box, var, nSentries, entries,
set_box, values, action);
hypre_BoxDestroy(set_box);
}
}
/* U-matrix */
if (nUentries > 0)
{
hypre_SStructUMatrixSetValues(matrix, part, cindex, var,
nUentries, Uentries, values, action);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
* (action =-2): get values and zero out
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSetBoxValues( HYPRE_SStructMatrix matrix,
HYPRE_Int part,
hypre_Box *set_box,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
hypre_Box *value_box,
HYPRE_Complex *values,
HYPRE_Int action )
{
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid);
HYPRE_Int *Sentries;
HYPRE_Int *Uentries;
HYPRE_Int nSentries;
HYPRE_Int nUentries;
hypre_SStructPMatrix *pmatrix;
hypre_SStructMatrixSplitEntries(matrix, part, var, nentries, entries,
&nSentries, &Sentries,
&nUentries, &Uentries);
/* S-matrix */
if (nSentries > 0)
{
pmatrix = hypre_SStructMatrixPMatrix(matrix, part);
hypre_SStructPMatrixSetBoxValues(pmatrix, set_box, var, nSentries, Sentries,
value_box, values, action);
/* put inter-part couplings in UMatrix and zero them out in PMatrix
* (possibly in ghost zones) */
if (nvneighbors[part][var] > 0)
{
hypre_SStructMatrixSetInterPartValues(matrix, part, set_box, var, nSentries, entries,
value_box, values, action);
}
}
/* U-matrix */
if (nUentries > 0)
{
hypre_SStructUMatrixSetBoxValues(matrix, part, set_box, var, nUentries, Uentries,
value_box, values, action);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Put inter-part couplings in UMatrix and zero them out in PMatrix (possibly in
* ghost zones). Assumes that all entries are stencil entries.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSetInterPartValues( HYPRE_SStructMatrix matrix,
HYPRE_Int part,
hypre_Box *set_box,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
hypre_Box *value_box,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
hypre_SStructPMatrix *pmatrix;
hypre_SStructPGrid *pgrid;
hypre_SStructStencil *stencil;
hypre_Index *shape;
HYPRE_Int *smap;
HYPRE_Int *vars, frvartype, tovartype;
hypre_StructMatrix *smatrix;
hypre_Box *box, *ibox0, *ibox1, *tobox, *frbox;
hypre_Index stride, loop_size;
hypre_IndexRef offset, start;
hypre_BoxManEntry **frentries, **toentries;
hypre_SStructBoxManInfo *frinfo, *toinfo;
HYPRE_Complex *tvalues = NULL;
HYPRE_Int nfrentries, ntoentries, frpart, topart;
HYPRE_Int entry, sentry, ei, fri, toi;
pmatrix = hypre_SStructMatrixPMatrix(matrix, part);
pgrid = hypre_SStructPMatrixPGrid(pmatrix);
frvartype = hypre_SStructPGridVarType(pgrid, var);
box = hypre_BoxCreate(ndim);
ibox0 = hypre_BoxCreate(ndim);
ibox1 = hypre_BoxCreate(ndim);
tobox = hypre_BoxCreate(ndim);
frbox = hypre_BoxCreate(ndim);
stencil = hypre_SStructPMatrixStencil(pmatrix, var);
smap = hypre_SStructPMatrixSMap(pmatrix, var);
shape = hypre_SStructStencilShape(stencil);
vars = hypre_SStructStencilVars(stencil);
hypre_SetIndex(stride, 1);
for (ei = 0; ei < nentries; ei++)
{
entry = entries[ei];
sentry = smap[entry];
offset = shape[entry];
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entry]);
tovartype = hypre_SStructPGridVarType(pgrid, vars[entry]);
/* shift box in the stencil offset direction */
hypre_CopyBox(set_box, box);
hypre_AddIndexes(hypre_BoxIMin(box), offset, ndim, hypre_BoxIMin(box));
hypre_AddIndexes(hypre_BoxIMax(box), offset, ndim, hypre_BoxIMax(box));
/* get "to" entries */
hypre_SStructGridIntersect(grid, part, vars[entry], box, -1,
&toentries, &ntoentries);
for (toi = 0; toi < ntoentries; toi++)
{
hypre_BoxManEntryGetExtents(
toentries[toi], hypre_BoxIMin(tobox), hypre_BoxIMax(tobox));
hypre_IntersectBoxes(box, tobox, ibox0);
if (hypre_BoxVolume(ibox0))
{
hypre_SStructBoxManEntryGetPart(toentries[toi], part, &topart);
/* shift ibox0 back */
hypre_SubtractIndexes(hypre_BoxIMin(ibox0), offset, ndim,
hypre_BoxIMin(ibox0));
hypre_SubtractIndexes(hypre_BoxIMax(ibox0), offset, ndim,
hypre_BoxIMax(ibox0));
/* get "from" entries */
hypre_SStructGridIntersect(grid, part, var, ibox0, -1,
&frentries, &nfrentries);
for (fri = 0; fri < nfrentries; fri++)
{
/* don't set couplings within the same part unless possibly for
* cell data (to simplify periodic conditions for users) */
hypre_SStructBoxManEntryGetPart(frentries[fri], part, &frpart);
if (topart == frpart)
{
if ( (frvartype != HYPRE_SSTRUCT_VARIABLE_CELL) ||
(tovartype != HYPRE_SSTRUCT_VARIABLE_CELL) )
{
continue;
}
hypre_BoxManEntryGetInfo(frentries[fri], (void **) &frinfo);
hypre_BoxManEntryGetInfo(toentries[toi], (void **) &toinfo);
if ( hypre_SStructBoxManInfoType(frinfo) ==
hypre_SStructBoxManInfoType(toinfo) )
{
continue;
}
}
hypre_BoxManEntryGetExtents(
frentries[fri], hypre_BoxIMin(frbox), hypre_BoxIMax(frbox));
hypre_IntersectBoxes(ibox0, frbox, ibox1);
if (hypre_BoxVolume(ibox1))
{
tvalues = hypre_TReAlloc(tvalues, HYPRE_Complex, hypre_BoxVolume(ibox1), HYPRE_MEMORY_SHARED);
if (action >= 0)
{
/* set or add */
/* copy values into tvalues */
start = hypre_BoxIMin(ibox1);
hypre_BoxGetSize(ibox1, loop_size);
zypre_BoxLoop2Begin(ndim, loop_size,
ibox1, start, stride, mi,
value_box, start, stride, vi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop2For(mi, vi)
{
tvalues[mi] = values[ei + vi*nentries];
}
zypre_BoxLoop2End(mi, vi);
/* put values into UMatrix */
hypre_SStructUMatrixSetBoxValues(
matrix, part, ibox1, var, 1, &entry, ibox1, tvalues, action);
/* zero out values in PMatrix (possibly in ghost) */
hypre_StructMatrixClearBoxValues(
smatrix, ibox1, 1, &sentry, -1, 1);
}
else
{
/* get */
/* get values from UMatrix */
hypre_SStructUMatrixSetBoxValues(
matrix, part, ibox1, var, 1, &entry, ibox1, tvalues, action);
/* copy tvalues into values */
start = hypre_BoxIMin(ibox1);
hypre_BoxGetSize(ibox1, loop_size);
zypre_BoxLoop2Begin(ndim, loop_size,
ibox1, start, stride, mi,
value_box, start, stride, vi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop2For(mi, vi)
{
values[ei + vi*nentries] = tvalues[mi];
}
zypre_BoxLoop2End(mi, vi);
} /* end if action */
} /* end if nonzero ibox1 */
} /* end of "from" boxman entries loop */
hypre_TFree(frentries, HYPRE_MEMORY_HOST);
} /* end if nonzero ibox0 */
} /* end of "to" boxman entries loop */
hypre_TFree(toentries, HYPRE_MEMORY_HOST);
} /* end of entries loop */
hypre_BoxDestroy(box);
hypre_BoxDestroy(ibox0);
hypre_BoxDestroy(ibox1);
hypre_BoxDestroy(tobox);
hypre_BoxDestroy(frbox);
hypre_TFree(tvalues, HYPRE_MEMORY_SHARED);
return hypre_error_flag;
}
|
tsynch.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
long result=0, result_even=0, result_odd=0;
void foo() {
#pragma omp parallel // reduction(+:result)
{
for (long i = 0; i < 10; i++) {
if (i%2) {
#pragma omp critical(even)
result_even++;
}
else {
#pragma omp critical(odd)
result_odd++;
}
}
#pragma omp barrier
printf("Values for even and odd are %ld and %ld, respectively\n", result_even, result_odd);
#pragma omp critical
result += (result_even + result_odd);
#pragma omp atomic
result++;
#pragma omp flush(result)
#pragma omp barrier
printf("result = %ld\n", result);
#pragma omp barrier
printf("To double check ... result = %ld\n", result);
}
}
int main(int argc, char *argv[]) {
foo();
}
|
GB_binop__le_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__le_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__le_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__le_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_uint16)
// A*D function (colscale): GB (_AxD__le_uint16)
// D*A function (rowscale): GB (_DxB__le_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__le_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__le_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_uint16)
// C=scalar+B GB (_bind1st__le_uint16)
// C=scalar+B' GB (_bind1st_tran__le_uint16)
// C=A+scalar GB (_bind2nd__le_uint16)
// C=A'+scalar GB (_bind2nd_tran__le_uint16)
// C type: bool
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_UINT16 || GxB_NO_LE_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__le_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__le_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__le_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fw.c | /*
Standard implementation of the Floyd-Warshall Algorithm
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "util.h"
#include <omp.h>
#include <immintrin.h>
#include <emmintrin.h>
inline int min(int a, int b);
int main(int argc, char **argv)
{
int **A;
int i,j,k;
struct timeval t1, t2;
double time;
int N=1024;
if (argc != 2) {
fprintf(stdout,"Usage: %s N\n", argv[0]);
exit(0);
}
N=atoi(argv[1]);
// t=atoi(argv[2]);
A = (int **) malloc(N*sizeof(int *));
for(i=0; i<N; i++) A[i] = (int *) malloc(N*sizeof(int));
graph_init_random(A,-1,N,128*N);
gettimeofday(&t1,0);
__m128i *p;
//__m128i *p1;
__m128i comp,Aij,Akj,Aik,Ai1k,Ai2k,Ai3k,Mask;
for(k=0;k<N;k++) {
#pragma omp parallel for private(i)
for(i=0; i< N ; i+=4) {
Aik=_mm_set1_epi32(A[i][k]);
Ai1k=_mm_set1_epi32(A[i+1][k]);
Ai2k=_mm_set1_epi32(A[i+2][k]);
Ai3k=_mm_set1_epi32(A[i+3][k]);
#pragma omp parallel for private(j)
for ( j=0 ; j<N ; j+=4) {
Akj=_mm_load_si128((__m128i*) &A[k][j]);
if(i!=k) {
p=(__m128i*)(&A[i][j]);
Aij=_mm_load_si128(p);
comp =_mm_add_epi32(Aik,Akj);
Aij =_mm_min_epi32(Aij,comp);
//Mask=_mm_cmplt_epi32(Aij,comp);
//Aij=_mm_or_si128( _mm_and_si128(Mask,Aij), _mm_andnot_si128(Mask,comp) );
_mm_store_si128(p,Aij);
}
p=(__m128i*)(&A[i+1][j]);
Aij=_mm_load_si128(p);
comp =_mm_add_epi32(Ai1k,Akj);
Aij =_mm_min_epi32(Aij,comp);
//Mask=_mm_cmplt_epi32(Aij,comp);
//Aij=_mm_or_si128( _mm_and_si128(Mask,Aij), _mm_andnot_si128(Mask,comp) );
_mm_store_si128(p,Aij);
p=(__m128i*)(&A[i+2][j]);
Aij=_mm_load_si128(p);
comp =_mm_add_epi32(Ai2k,Akj);
Aij =_mm_min_epi32(Aij,comp);
//Mask=_mm_cmplt_epi32(Aij,comp);
//Aij=_mm_or_si128( _mm_and_si128(Mask,Aij), _mm_andnot_si128(Mask,comp) );
_mm_store_si128(p,Aij);
p=(__m128i*)(&A[i+3][j]);
Aij=_mm_load_si128(p);
comp =_mm_add_epi32(Ai3k,Akj);
Aij =_mm_min_epi32(Aij,comp);
//Mask=_mm_cmplt_epi32(Aij,comp);
//Aij=_mm_or_si128( _mm_and_si128(Mask,Aij), _mm_andnot_si128(Mask,comp) );
_mm_store_si128(p,Aij);
}
}
}
/*
for(k=K; k<K+N; k++)
for(i=I; i<I+N; i++)
for(j=J; j<J+N; j++) {
A[i][j]=min(A[i][j],A[i][k]+A[k][j]);
}
*/
/* if ( i==k ) continue ;
for(j=0; j<N ; j++)
A[i][j]=min(A[i][j], A[i][k]+A[k][j]);
}
}*/
/* for(i=0; i<N; i++)
for(j=0; j<N; j++)
A[i][j]=min(A[i][j], A[i][k] + A[k][j]);
*/
gettimeofday(&t2,0);
time=(double)((t2.tv_sec-t1.tv_sec)*1000000+t2.tv_usec-t1.tv_usec)/1000000;
printf("FW,%d,%.4f\n", N, time);
/*
for(i=0; i<N; i++)
for(j=0; j<N; j++) fprintf(stdout,"%d\n", A[i][j]);
*/
return 0;
}
inline int min(int a, int b)
{
if(a<=b)return a;
else return b;
}
|
log.h | /**
* Copyright (c) 2017 rxi
*
* This library is free software; you can redistribute it and/or modify it
* under the terms of the MIT license. See `log.c` for details.
*/
#pragma once
#ifdef USE_LOG
#include <stdio.h>
#include <stdarg.h>
#include <cstring>
#include <string>
#define LOG_VERSION "0.1.0"
typedef void (*log_LockFn)(void *udata, int lock);
enum {
LOG_TRACE, LOG_DEBUG, LOG_INFO, LOG_WARN, LOG_ERROR, LOG_FATAL
};
#define __FILENAME__ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define RAPIDS_FILE (__FILENAME__)
#define log_trace(...) log_log(LOG_TRACE, RAPIDS_FILE, __LINE__, __VA_ARGS__)
#define log_debug(...) log_log(LOG_DEBUG, RAPIDS_FILE, __LINE__, __VA_ARGS__)
#define log_info(...) log_log(LOG_INFO, RAPIDS_FILE, __LINE__, __VA_ARGS__)
#define log_warn(...) log_log(LOG_WARN, RAPIDS_FILE, __LINE__, __VA_ARGS__)
#define log_error(...) log_log(LOG_ERROR, RAPIDS_FILE, __LINE__, __VA_ARGS__)
#define log_fatal(...) log_log(LOG_FATAL, RAPIDS_FILE, __LINE__, __VA_ARGS__)
void log_set_udata(void *udata);
void log_set_lock(log_LockFn fn);
void log_set_fp(FILE *fp);
void log_set_level(int level);
void log_set_quiet(int enable);
void log_log(int level, const char *file, int line, const char *fmt, ...);
inline void print_str(std::string str) {
#ifdef DEBUG
static thread_local bool is_first = true;
if (is_first) {
#pragma omp single
log_info("%s", str.c_str());
is_first = false;
}
#endif
}
#else //use log
#define log_trace(...)
#define log_debug(...)
#define log_info(...)
#define log_warn(...)
#define log_error(...)
#define log_fatal(...)
#endif //use log |
GB_assign_zombie2.c | //------------------------------------------------------------------------------
// GB_assign_zombie2: delete all entries in C(i,:) for GB_assign
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C(i,:)<!> = anything: GrB_Row_assign or GrB_Col_assign with an empty
// complemented mask requires all entries in C(i,:) to be deleted.
// C must be sparse or hypersparse.
// C->iso is not affected.
#include "GB_assign.h"
#include "GB_assign_zombie.h"
void GB_assign_zombie2
(
GrB_Matrix C,
const int64_t i,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_FULL (C)) ;
ASSERT (!GB_IS_BITMAP (C)) ;
ASSERT (GB_ZOMBIES_OK (C)) ;
ASSERT (!GB_JUMBLED (C)) ; // binary search is used
ASSERT (!GB_PENDING (C)) ;
//--------------------------------------------------------------------------
// get C
//--------------------------------------------------------------------------
const int64_t *restrict Cp = C->p ;
int64_t *restrict Ci = C->i ;
const int64_t Cnvec = C->nvec ;
int64_t nzombies = C->nzombies ;
const int64_t zorig = nzombies ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ;
//--------------------------------------------------------------------------
// C(i,:) = empty
//--------------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
int64_t kfirst, klast ;
GB_PARTITION (kfirst, klast, Cnvec, taskid, ntasks) ;
for (int64_t k = kfirst ; k < klast ; k++)
{
//------------------------------------------------------------------
// find C(i,j)
//------------------------------------------------------------------
int64_t pC = Cp [k] ;
int64_t pC_end = Cp [k+1] ;
int64_t pright = pC_end - 1 ;
bool found, is_zombie ;
GB_BINARY_SEARCH_ZOMBIE (i, Ci, pC, pright, found, zorig,
is_zombie) ;
//------------------------------------------------------------------
// if found and not a zombie, mark it as a zombie
//------------------------------------------------------------------
if (found && !is_zombie)
{
ASSERT (i == Ci [pC]) ;
nzombies++ ;
Ci [pC] = GB_FLIP (i) ;
}
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
C->nzombies = nzombies ;
}
|
heterogeneous.c | #include <stdio.h>
#include <omp.h>
#include "mpi.h"
int main(int argc, char **argv) {
int thread_safety;
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &thread_safety);
int size, rank;
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#pragma omp parallel
{
int tid = omp_get_thread_num();
int num_threads = omp_get_num_threads();
printf("Hello from rank %d running omp thread %d/%d\n", rank, tid, num_threads);
}
return 0;
}
|
GB_binop__pow_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__pow_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__pow_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint8)
// C=scalar+B GB (_bind1st__pow_uint8)
// C=scalar+B' GB (_bind1st_tran__pow_uint8)
// C=A+scalar GB (_bind2nd__pow_uint8)
// C=A'+scalar GB (_bind2nd_tran__pow_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_pow_uint8 (aij, bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_pow_uint8 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_UINT8 || GxB_NO_POW_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pow_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__pow_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__pow_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_pow_uint8 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_pow_uint8 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_uint8 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_uint8 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mandel_omp_nox_dynamic_512.c | /* Sequential Mandlebrot program */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include <time.h>
#define X_RESN 1000 /* x resolution */
#define Y_RESN 1000 /* y resolution */
#define MAX_ITER (2000)
// ref: https://stackoverflow.com/questions/6749621/how-to-create-a-high-resolution-timer-in-linux-to-measure-program-performance
// call this function to start a nanosecond-resolution timer
struct timespec timer_start() {
struct timespec start_time;
clock_gettime(CLOCK_MONOTONIC, &start_time);
return start_time;
}
// call this function to end a timer, returning nanoseconds elapsed as a long
long timer_end(struct timespec start_time){
struct timespec end_time;
clock_gettime(CLOCK_MONOTONIC, &end_time);
long diffInNanos = (end_time.tv_sec - start_time.tv_sec) * (long)1e9 + (end_time.tv_nsec - start_time.tv_nsec);
return diffInNanos;
}
typedef struct complextype {
double real, imag;
} Compl;
int main(int argc, char *argv[])
{
struct timespec vartime = timer_start();
/* Mandlebrot variables */
int *ks;
ks = (int *)malloc((X_RESN*Y_RESN) * sizeof(int));
double *ds;
ds = (double *)malloc((X_RESN*Y_RESN) * sizeof(double));
/* Calculate and draw points */
#pragma omp parallel default(shared)
{
int num_threads = omp_get_num_threads();
// printf("num_threads = %d\n", num_threads);
#pragma omp for schedule(dynamic, 512)
for (int it = 0; it < X_RESN*Y_RESN; it++)
{
int i = it / Y_RESN;
int j = it % Y_RESN;
// mandelbrot set is defined in the region of x = [-2, +2] and y = [-2, +2]
double u = ((double)i - (X_RESN / 2.0)) / (X_RESN / 4.0);
double v = ((double)j - (Y_RESN / 2.0)) / (Y_RESN / 4.0);
Compl z, c, t;
z.real = z.imag = 0.0;
c.real = v;
c.imag = u;
int k = 0;
double d = 0.0;
double lengthsq, temp;
do
{ /* iterate for pixel color */
t = z;
z.imag = 2.0 * t.real * t.imag + c.imag;
z.real = t.real * t.real - t.imag * t.imag + c.real;
lengthsq = z.real * z.real + z.imag * z.imag;
d += pow(pow(z.imag - t.imag, 2.0) + pow(z.real - t.real, 2.0), 0.5);
k++;
} while (lengthsq < 4.0 && k < MAX_ITER);
ks[it] = k;
ds[it] = d;
}
}
long time_elapsed_nanos = timer_end(vartime);
double elapsed = time_elapsed_nanos*0.000000001;
printf("%lf\n", elapsed);
/* Program Finished */
return 0;
}
|
bfs_omp.c | #include <assert.h>
#include <stdbool.h>
#include <stdlib.h>
#include "bfs.h"
#include "bitmap.h"
#include "frontier.h"
#include "types.h"
bfs_result bfs_omp(csr *adj, u32 src) {
// Adjacency matrix should be square matrix
assert(adj->m == adj->n);
frontier *f = frontier_new(adj->nz);
frontier *fnext = frontier_new(adj->nz);
bitmap *b = bitmap_new(adj->n);
bitmap *bnext = bitmap_new(adj->n);
// Start with top-down method
bool top_down = true;
u32 mf = csr_row_len(adj, src);
u32 nf = 1;
u32 mu = adj->nz;
// Scratch buffer for storing degrees of vertices in frontier
u32 *degree = malloc(sizeof(u32) * (adj->n + 1));
u32 *parent = malloc(sizeof(u32) * adj->n);
u32 *distance = calloc(adj->n, sizeof(u32));
degree[0] = 0;
distance[src] = 0;
parent[src] = src;
f->node[0] = src;
f->len = 1;
while (!(top_down && frontier_empty(f))) {
// Collect vertex degrees
if (top_down) {
/// Top down travesal
#pragma omp parallel for
for (u32 i = 0; i < f->len; i += 1) {
degree[i + 1] = adj->r[f->node[i] + 1] - adj->r[f->node[i]];
}
// Run prefix sum on degree on array
prefix_sum(degree, f->len + 1);
fnext->len = degree[f->len];
#pragma omp parallel for schedule(guided) reduction(+: mf, nf) reduction(- : mu)
for (u32 i = 0; i < f->len; i += 1) {
u32 v = f->node[i], index = degree[i];
for (u32 j = 0; j < csr_row_len(adj, v); j += 1) {
/// \todo: Frequent cache miss here... Graph compression?
u32 next = adj->c[csr_row_begin(adj, v) + j];
if (!bitmap_test_set(b, next)) {
fnext->node[index + j] = next;
distance[next] = distance[v] + 1;
mf += csr_row_len(adj, next);
mu -= csr_row_len(adj, next);
nf += 1;
} else {
fnext->node[index + j] = SENTINEL;
}
}
}
frontier_cull(fnext, f);
} else {
/// Bottom up travesal
bitmap_clear(bnext);
#pragma omp parallel for schedule(guided) reduction(+ : mf, nf) reduction(- : mu)
for (u32 v = 0; v < adj->n; v += 1) {
if (!bitmap_test(b, v)) {
for (u32 i = csr_row_begin(adj, v); i < csr_row_end(adj, v); i += 1) {
/// \todo: Frequent cache miss here too... 40% Global, 22% LLC...
if (bitmap_test(b, adj->c[i])) {
bitmap_set(bnext, v);
distance[v] = distance[adj->c[i]] + 1;
mf += csr_row_len(adj, v);
mu -= csr_row_len(adj, v);
nf += 1;
break;
}
}
}
}
bitmap_merge(b, bnext);
}
if (top_down) {
if (mf > mu / 14) {
top_down = false;
}
} else {
if (nf < adj->n / 24) {
f->len = bitmap2array(bnext, f->node);
top_down = true;
}
}
mf = 0;
nf = 0;
}
frontier_free(f);
frontier_free(fnext);
bitmap_free(b);
bitmap_free(bnext);
free(degree);
return (bfs_result){parent, distance};
}
|
CPUMatrixTensorImpl.h | // Move some files out of CPUMatrixImpl.h to prevent compiler crash on out-of-heap
#include "CPUMatrix.h"
#include "TensorOps.h"
namespace Microsoft { namespace MSR { namespace CNTK {
// =======================================================================
// TensorView support
// =======================================================================
// To save time, this makes extensive use of templates and macros.
// -----------------------------------------------------------------------
// function to compute the value for a given output location (perform reduction if needed)
// -----------------------------------------------------------------------
// perform loop over reduction index m
// This function is declared inside a wrapper struct to allow partial specialization (m = -1).
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int m>
struct TensorOpReduction
{
// reduction case (non-reduction case is specialized)
static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction
for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled
strides[i] = reducingStrides[i][(size_t) m];
double aggregate = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides);
for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;)
{
// advance the pointers
for (size_t i = 0; i < N - 1; i++)
pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here
// need to descend into one loop deeper
aggregate = reductionOp(aggregate, TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides));
}
// Actually it would be nicer to return double but we keep ElementType so that test don't return different numbers than previous implementation.
return static_cast<ElemType>(aggregate);
}
};
// perform loop over reduction index m
// This is the specialized version for m = -1, which terminates the recursion.
template <class ElemType, typename OPFN, typename ReductionOp, size_t N>
struct TensorOpReduction<ElemType, OPFN, ReductionOp, N, -1>
{
static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& /*reductionOp*/,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&)
{
return opfn(pointers); // finally we are doing some work!!!
}
};
// perform loop over reduction index m, while keeping track of the number of elements and their corresponding indices.
// This function is declared inside a wrapper struct to allow partial specialization (m = -1).
template <class ElemType, size_t N, int m>
struct TensorArgOpReduction
{
static inline std::pair<ElemType, size_t> ReduceAll(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides,
ElementWiseOperator reductionOp)
{
size_t counter = 0;
size_t index = 0;
ElemType val = (ElemType)0;
switch (reducingOpDims.size())
{
case 3:
val = TensorArgOpReduction<ElemType, N, 2>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
case 2:
val = TensorArgOpReduction<ElemType, N, 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
case 1:
val = TensorArgOpReduction<ElemType, N, 0>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
case 0:
val = TensorArgOpReduction<ElemType, N, -1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
default:
LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)reducingOpDims.size());
}
return make_pair(val, index);
}
// reduction case (non-reduction case is specialized)
static inline ElemType Loop(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides,
ElementWiseOperator reductionOp, size_t& counter, size_t& index)
{
array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction
for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled
strides[i] = reducingStrides[i][(size_t)m];
ElemType aggregate = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;)
{
// advance the pointers
for (size_t i = 0; i < N - 1; i++)
pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here
ElemType val = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
bool update = false;
switch (reductionOp)
{
case ElementWiseOperator::opArgmin:
update = (aggregate > val);
break;
case ElementWiseOperator::opArgmax:
update = (aggregate < val);
break;
}
if (update)
{
aggregate = val;
index = counter - 1;
}
}
return aggregate;
}
};
// perform loop over reduction index m
// This is the specialized version for m = -1, which terminates the recursion.
template <class ElemType, size_t N>
struct TensorArgOpReduction<ElemType, N, -1>
{
static inline ElemType Loop(array<ElemType*, N> pointers,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, ElementWiseOperator /*reductionOp*/, size_t& counter, size_t& /*index*/)
{
counter++;
return *pointers[0]; // finally we are doing some work!!!
}
};
// -----------------------------------------------------------------------
// perform loop over regular index k for N-nary operations (N counting the output)
// -----------------------------------------------------------------------
// perform loop over regular index k and reducing index m for N operands (counting the output)
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m, int k>
struct TensorOpIteration
{
static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
// non-scalar case: still nested result loops left
array<ptrdiff_t, N> strides;
for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled
strides[i] = regularStrides[i][(size_t) k];
for (size_t dim = regularOpDims[(size_t) k]; dim-- > 0;)
{
// need to descend into one loop deeper
TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, k - 1>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
// advance the pointers
for (size_t i = 0; i < N; i++)
pointers[i] += strides[i];
}
}
};
// Special version for innermost loop with strides all being 1 and no further reduction. Compiler can use SSE.
// This is a very common case, e.g. adding vectors or computing the Sigmoid.
template <class ElemType, typename OPFN, typename ReductionOp>
struct TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/>
{
static inline void Loop(ElemType beta, array<ElemType*, 3> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides)
{
ElemType* pa = pointers[0];
ElemType* pb = pointers[1];
ElemType* pc = pointers[2];
size_t K = regularOpDims[0];
// special-case beta and alpha to allow the compiler to short-circuit it
if (beta != 0)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else if (alpha != 1)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
// TODO: According to Amit, the VS compiler is not able to vectorize into lambdas. Solution: change the lambda to take an N, or to implement the loop inside (with 1 element by default).
// TODO: The signedness of k (required for omp) causes an extra sign-extend.
// TODO: OMP adds LOTS of overhead. Do we need a guard, a min size when to use it?
}
};
// and unary
template <class ElemType, typename OPFN, typename ReductionOp>
struct TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/>
{
static inline void Loop(ElemType beta, array<ElemType*, 2> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
ElemType* pa = pointers[0];
ElemType* pb = pointers[1];
size_t K = regularOpDims[0];
// special-case beta and alpha to allow the compiler to short-circuit it
if (beta != 0)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else if (alpha != 1)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
};
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m>
struct TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, -1>
{
static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
// we are at element level for the result: perform the op (there may still be reduction)
ElemType val = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides);
// scale
val *= alpha;
// combine with previous value in target matrix, then write it out
auto* pout = pointers.back();
if (beta != 0)
val += beta * *pout;
// save
*pout = val;
return;
}
};
// perform loop over regular index k and reducing index m for N operands (counting the output), the difference
// between TensorOpIteration and TensorArgOpIteration, is that the latter store the index of the result, instead of
// the result. The reason that they aren't combined is because of performance.
template <class ElemType, size_t N, int k>
struct TensorArgOpIteration
{
static inline void Loop(array<ElemType*, N> pointers,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp)
{
// non-scalar case: still nested result loops left
array<ptrdiff_t, N> strides;
for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled
strides[i] = regularStrides[i][(size_t)k];
for (size_t dim = regularOpDims[(size_t)k]; dim-- > 0;)
{
// need to descend into one loop deeper
TensorArgOpIteration<ElemType, N, k - 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
// advance the pointers
for (size_t i = 0; i < N; i++)
pointers[i] += strides[i];
}
}
};
template <class ElemType, size_t N>
struct TensorArgOpIteration<ElemType, N, -1>
{
static inline void Loop(array<ElemType*, N> pointers,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp)
{
// we are at element level for the result: perform the op (there may still be reduction)
auto val = TensorArgOpReduction<ElemType, N, 2>::ReduceAll(pointers, reducingOpDims, reducingStrides, reductionOp);
auto* pout = pointers.back();
*pout = (ElemType)val.second;
return;
}
};
// -----------------------------------------------------------------------
// map runtime parameters N to template parameters
// -----------------------------------------------------------------------
// tensor operation with k+1 dimensions (-1 means scalar)
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int k>
static void TensorOpWithRegularLoop(ElemType beta, const array<ElemType*, N>& pointers, ElemType alpha, const OPFN& opfn, ReductionOp reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
size_t dims = reducingOpDims.size();
switch (dims)
{
case 2:
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 1:
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 0, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 0:
{
// if all leading dimensions are 1, we can let the compiler do some unrolling
bool leadingAllOne = true;
for (size_t i = 0; i < N; i++)
leadingAllOne &= k >= 0 && regularStrides[i][0] == 1;
if (leadingAllOne) // special version that uses a hard-coded increment of 1 for all leading dimensions
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, true /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
default:
LogicError("TensorOp: %d non-flattened reduction dimensions are not supported.", (int) dims);
}
}
// tensor operation, generalized in number of arguments, operation already provided as a lambda
// This function now expands into different k.
template <class ElemType, typename OPFN, typename ReductionOp, size_t N>
static void TensorOpWithFnAndReduction(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const array<size_t, N>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled
pointers[i] += offsets[i];
size_t dims = regularOpDims.size();
switch (dims)
{
// N.B. consider code size impact when adding more cases.
case 5:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 4>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 4:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 3>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 3:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 2>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 2:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 1:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 0>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 0:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, -1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
default:
LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)dims);
}
}
// tensor operation, generalized in number of arguments, operation already provided as a lambda
// This function now expands into different reductionOps
template <class ElemType, typename OPFN, size_t N>
static void TensorOpWithFn(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, ElementWiseOperator reductionOp,
const array<size_t, N>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
// BUGBUG: Using always 'double' as type of aggregator even for ElemType==float. Reason: otherwise some e2e test would fail as historically we
// used double for aggregator of sum. But:
// * for min and max reductions this is meaningless.
// * It is not consitent with what we do on GPU, there we aggregate on ElemType.
// * It costs performance.
// TODO: apdapt e2e tests to run with aggregator of type ElemType.
#define CaseTensorOpWithFnAndReduction(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFnAndReduction(beta, pointers, alpha, opfn, [](double a, double b) \
{ \
return Op##oper(a, b); \
}, \
offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
switch (reductionOp)
{
CaseTensorOpWithFnAndReduction(Sum);
CaseTensorOpWithFnAndReduction(LogSum);
CaseTensorOpWithFnAndReduction(Min);
CaseTensorOpWithFnAndReduction(Max);
CaseTensorOpWithFnAndReduction(ElementwiseProduct);
default:
LogicError("Specified ElementWiseOperator op %d not supported as reduction operation.", (int)reductionOp);
}
}
// -----------------------------------------------------------------------
// entry points from Matrix.cpp; also map op to a lambda
// -----------------------------------------------------------------------
// special tensor ops for inference speed
template <class ElemType>
bool CPUMatrixSpecialUnaryTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 2>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides);
template <class ElemType>
bool CPUMatrixSpecialBinaryTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 3>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides);
template <class ElemType>
bool CPUMatrixSpecialTernaryTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 4>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides);
// perform unary operation 'op' on a giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrixTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 2>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opSum &&
reductionOp != ElementWiseOperator::opLogSum &&
reductionOp != ElementWiseOperator::opMin &&
reductionOp != ElementWiseOperator::opMax &&
reductionOp != ElementWiseOperator::opElementwiseProduct)
InvalidArgument("TensorOp: Unary reduction operations other than opMax, opMin, opSum, and opLogSum are not implemented.");
#ifdef USE_MKL
if (!!(CPUMatrix<ElemType>::GetOptimizationFlags() & CPUMatrix<ElemType>::OPT_EVAL_WITH_MKL) &&
CPUMatrixSpecialUnaryTensorOpImpl(beta, a, o, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides))
return;
#endif
// TODO: Change the lambda to take a pointer and a number of elements, so that we can pass it 1 or 4 elements, in order for it to SSE-vectorize.
#define CaseUnaryTensorOp(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 2>& pp) \
{ \
return Op##oper((*(pp[0]))); \
}, \
reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
array<ElemType*, 2> pointers = {a.Data(), o.Data()};
switch (op)
{
ForAllUnaryOps(CaseUnaryTensorOp);
default:
LogicError("TensorOp: Unknown unary op code %d.", (int) op);
}
}
// perform binary operation 'op' on a and b giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrixTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 3>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opSum)
InvalidArgument("TensorOp (binary): The only permitted binary reduction operation is opSum.");
#ifdef USE_MKL
if (!!(CPUMatrix<ElemType>::GetOptimizationFlags() & CPUMatrix<ElemType>::OPT_EVAL_WITH_MKL) &&
CPUMatrixSpecialBinaryTensorOpImpl(beta, a, b, o, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides))
return;
#endif
#define CaseBinaryTensorOp(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 3>& pp) \
{ \
return Op##oper((*(pp[0])), (*(pp[1]))); \
}, \
reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
array<ElemType*, 3> pointers = {a.Data(), b.Data(), o.Data()};
switch (op)
{
ForAllBinaryOps(CaseBinaryTensorOp);
default:
LogicError("TensorOp: Unknown op binary code %d.", (int) op);
}
}
// perform ternary operation 'op' on a, and c giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrixTensorOpImpl(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, CPUMatrix<ElemType>& o, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 4>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opSum)
InvalidArgument("TensorOp: The only permitted ternary reduction operation is opSum.");
#ifdef USE_MKL
if (!!(CPUMatrix<ElemType>::GetOptimizationFlags() & CPUMatrix<ElemType>::OPT_EVAL_WITH_MKL) &&
CPUMatrixSpecialTernaryTensorOpImpl(beta, a, b, c, o, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides))
return;
#endif
#define CaseTernaryTensorOp(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 4>& pp) \
{ \
return Op##oper((*(pp[0])), (*(pp[1])), (*(pp[2]))); \
}, \
reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
array<ElemType*, 4> pointers = {a.Data(), b.Data(), c.Data(), o.Data()};
switch (op)
{
ForAllTernaryOps(CaseTernaryTensorOp);
default:
LogicError("TensorOp: Unknown ternary op code %d.", (int) op);
}
}
template <class ElemType>
void CPUMatrixTensorArgOpImpl(const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& o, ElementWiseOperator reductionOp,
const array<size_t, 2>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opArgmin &&
reductionOp != ElementWiseOperator::opArgmax)
InvalidArgument("TensorOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented.");
if (o.GetNumElements() == 1)
{
o.Data()[0] = (ElemType) a.ArgOp(reductionOp);
}
else
{
const size_t N = 2;
array<ElemType*, N> pointers = { a.Data(), o.Data() };
for (size_t i = 0; i < N; i++)
pointers[i] += offsets[i];
switch (regularOpDims.size())
{
case 2:
TensorArgOpIteration<ElemType, N, 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
break;
case 1:
TensorArgOpIteration<ElemType, N, 0>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
break;
case 0:
TensorArgOpIteration<ElemType, N, -1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
break;
default:
LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)regularOpDims.size());
}
}
}
}}} |
GB_unop__sinh_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sinh_fc64_fc64)
// op(A') function: GB (_unop_tran__sinh_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = csinh (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = csinh (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = csinh (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SINH || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sinh_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = csinh (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = csinh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sinh_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | #include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <math.h>
#include <string.h>
#include <timer.h>
static float printAccuracy(float, float);
extern void ddot_mpfr(int, const float*, const float*, float*);
extern void ddot_naive_scalar(int, const float*, const float*, float*);
extern void ddot_naive_vec(int, const float*, const float*, float*);
extern void ddot_kahan_scalar_intrin(int, const float*, const float*, float*);
extern void ddot_kahan_avx(int, const float*, const float*, float*);
void benchmark(char *version, void(*func)(int, const float *, const float *, float *), int N, const float *x, const float *y) {
static float ref = 0.0f;
float result;
int i, j;
double runtime = 0.0f;
/* run benchmark and increase number of iterations until runtime > 0.1s */
for (i=1; runtime < 0.1f; i=i*2) {
TimerData tdata;
timer_start(&tdata);
for (j=0; j<i; ++j)
func(N, x, y, &result);
timer_stop(&tdata);
runtime = timer_print(&tdata);
}
/* 'i' was doubled one time to often */
i = i / 2;
/* first run should be MPFR to set reference value */
if (ref == 0.0f) {
if (strncmp(version, "MPFR", 4) != 0) {
printf("First version must be MPFR to set reference value!\n");
exit(EXIT_FAILURE);
}
ref = result;
}
/* output results */
printf("%s", version);
printf("Result %20.15f\t\t", result);
printf("Perf %f Elements/s\t",((double)N * (double)i)/runtime);
printf("Accuracy %g bits\t", printAccuracy(ref, result));
printf("%d iterations\n", i);
}
static void mem_allocate(
void** ptr,
int alignment,
uint64_t size)
{
int errorCode;
errorCode = posix_memalign(ptr, alignment, size);
if (errorCode)
{
if (errorCode == EINVAL)
{
fprintf(stderr,
"Alignment parameter is not a power of two\n");
exit(EXIT_FAILURE);
}
if (errorCode == ENOMEM)
{
fprintf(stderr,
"Insufficient memory to fulfill the request\n");
exit(EXIT_FAILURE);
}
}
if ((*ptr) == NULL)
{
fprintf(stderr, "posix_memalign failed!\n");
exit(EXIT_FAILURE);
}
}
static float printAccuracy(float ref, float result)
{
float acc;
if (fabs(ref-result) == 0)
{
acc = 23;
}
else
{
acc = log2(fabs(ref)) - log2(fabs(ref-result));
}
return acc;
}
static void init_benign(int N, float *x)
{
/* produce deterministic results */
srand48(1);
#pragma omp parallel for schedule(static)
for (int i=0; i<N; ++i)
x[i] = drand48()/((float)i*(float)i+1.);
}
static void compare(float ref, float res) {
if (res == ref)
printf("[OK]\n");
else {
printf("[FAIL: Ref: %f Is: %f Ref-Is: %f]\n", ref, res, ref-res);
exit(EXIT_FAILURE);
}
}
static void verify_test(float *A, float *B, int N) {
float ref, res;
printf("mpfr\t"); ddot_mpfr(N, A, B, &ref); printf("%d\t", N);
printf("ddot_naive_scalar\t"); ddot_naive_scalar(N, A, B, &res); compare(ref, res);
printf("ddot_naive_vec\t"); ddot_naive_vec(N, A, B, &res); compare(ref, res);
printf("ddot_kahan_scalar_intrin\t"); ddot_kahan_scalar_intrin(N, A, B, &res); compare(ref, res);
printf("ddot_kahan_avx\t"); ddot_kahan_avx(N, A, B, &res); compare(ref, res);
printf("\n");
}
void verify(void) {
float *A, *B;
int N = 4096;
mem_allocate((void**) &A, 64, N * sizeof(float));
mem_allocate((void**) &B, 64, N * sizeof(float));
for (int i=0; i<4096; ++i) {
A[i] = 1.0f;
B[i] = 1.0f;
}
verify_test(A, B, 4096); /* power of two */
verify_test(A, B, 2); /* small value, smaller than AVX */
verify_test(A, B, 11); /* small value, larger than AVX */
verify_test(A, B, 269); /* medium value */
verify_test(A, B, 3571); /* prime */
for (int i=0; i<4096; ++i) {
A[i] = 1.0;
B[i] = 2.0;
}
verify_test(A, B, 2); /* small value, smaller than AVX */
verify_test(A, B, 11); /* small value, larger than AVX */
verify_test(A, B, 512); /* medium value */
verify_test(A, B, 3000); /* medium value */
}
void test_precision(int N, float *x, float *y) {
benchmark("MPFR\t\t\t", ddot_mpfr, N, x, y);
benchmark("Naive-scalar\t\t", ddot_naive_scalar, N, x, y);
benchmark("Naive-VEC\t\t", ddot_naive_vec, N, x, y);
benchmark("Kahan-scalar-intrin\t", ddot_kahan_scalar_intrin, N, x, y);
benchmark("Kahan-AVX\t\t", ddot_kahan_avx, N, x, y);
}
int main ( int argc, char * argv[] )
{
float* x;
float* y;
int N = atoi(argv[1]);
if (strncmp("verify", argv[1], 6) == 0) {
verify();
exit(EXIT_SUCCESS);
}
timer_init();
printf("**********************************************************\n");
printf("Length %d\n",N);
mem_allocate((void**) &x, 64, N * sizeof(float));
mem_allocate((void**) &y, 64, N * sizeof(float));
init_benign(N, x);
init_benign(N, y);
test_precision(N, x, y);
}
|
dragonfly4_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* based on rawSHA256_fmt.c code
*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* The DragonFly BSD 2.10.1-REL crypt-sha2 hashes are seriously broken. See
* http://www.openwall.com/lists/john-dev/2012/01/16/1
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_dragonfly4_32;
extern struct fmt_main fmt_dragonfly4_64;
#elif FMT_REGISTERS_H
john_register_one(&fmt_dragonfly4_32);
john_register_one(&fmt_dragonfly4_64);
#else
#include "sha2.h"
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#ifdef _OPENMP
#define OMP_SCALE 256
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL_32 "dragonfly4-32"
#define FORMAT_LABEL_64 "dragonfly4-64"
#define FORMAT_NAME_32 "DragonFly BSD $4$ w/ bugs, 32-bit"
#define FORMAT_NAME_64 "DragonFly BSD $4$ w/ bugs, 64-bit"
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "SHA512 64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "SHA512 32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define CIPHERTEXT_LENGTH 84
#define BINARY_SIZE 64
#define BINARY_ALIGN 4
#define USED_BINARY_SIZE 62 // Due to base64 bug in DragonBSD crypt-sha512.c
#define SALT_SIZE_32 (1+4+8) // 1st char is length
#define SALT_SIZE_64 (1+8+8)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests_32[] = {
{"$4$7E48ul$K4u43llx1P184KZBoILl2hnFLBHj6.486TtxWA.EA1pLZuQS7P5k0LQqyEULux47.5vttDbSo/Cbpsez.AUI", "magnum"},
{"$4$Hz$5U1s18ntUYE24mF3JN44BYZPN34HBCMw57.Yw2JeKoiBkTVSGBDZEPT325hvR7iw8QYHy9kG7WUW8LCM.6UD", ""},
{"$4$W$79ddF.iDXVPcf/uf8bMFl15leilo1GE8C2KnEAWs3isK930rVy1EZZS2veHgU17NRt4qpKTtZRCA.QC7.68j", "password"},
{"$4$dw7uRHW$Cs6rbZqAVEEp9dsYOl4w/U84YydqdsEYyxHNvAtd2bcLz2Eem9L7FI/aGD2ayAybmprtYZLq2AtdXBio.cX0", "John the Ripper"},
{"$4$2tgCi76D$zy7ms.v1Y8HcsasTaR8n/Ng8GH4dhPv4ozihbM4JMNSJUmw7wVKbcqksefn7nVT.WrN18fV8i1yh7Gmq.cXC", "DragonFly BSD"},
{NULL}
};
static struct fmt_tests tests_64[] = {
{"$4$7E48ul$9or6.L/T.iChtPIGY4.vIgdYEmMkTW7Ru4OJxtGJtonCQo.wu3.bS4UPlUc2B8CAfGo1Oi5PgQvfhzNQ.A8v", "magnum"},
{"$4$Hz$Mujq0GrjuRtPhcM/0rOfbr2l9fXGfVwKAuL9oL5IH.RnOO1zcgG/S6rSIrebK4g0BEgKGKc0zmWpnk3O..uR", ""},
{"$4$W$.eHqh7OeyhVkBG0lCuUFnEShQq3tZt1QOLUx/9vIt3p56rUMCu2w7iQof7HwWa1pJwcBpPG.7KK3Pcce.oFX", "password"},
{"$4$dw7uRHW$17b2EzV3m0ziCLQoSKzUElTVgkL7cHXQzZzeeuNnkee/bchs0VHGqzjXrMZtWVfK2OW8.GfHvtZgzqGF.IUZ", "John the Ripper"},
{"$4$2tgCi76D$NL8CBWreQkoaVeGVL/a27ZrwYq6M8mlNt.uqc9E9.OiANu6JHdQy2r6J4uAZuD7wKqAQier1YVL7M0IF.gvi", "DragonFly BSD"},
{NULL}
};
static int (*saved_key_length);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)
[(BINARY_SIZE + sizeof(ARCH_WORD_32) - 1) / sizeof(ARCH_WORD_32)];
static char *cur_salt;
static int salt_len;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt = omp_t * MIN_KEYS_PER_CRYPT;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt = omp_t * MAX_KEYS_PER_CRYPT;
#endif
saved_key_length = mem_calloc_tiny(sizeof(*saved_key_length) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *pos, *start;
if (strncmp(ciphertext, "$4$", 3))
return 0;
ciphertext += 3;
for (pos = ciphertext; *pos && *pos != '$'; pos++);
if (!*pos || pos < ciphertext || pos > &ciphertext[8]) return 0;
start = ++pos;
while (atoi64[ARCH_INDEX(*pos)] != 0x7F) pos++;
if (*pos || pos - start != CIPHERTEXT_LENGTH) return 0;
return 1;
}
#define TO_BINARY(b1, b2, b3) \
value = (ARCH_WORD_32)atoi64[ARCH_INDEX(pos[0])] | \
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[1])] << 6) | \
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[2])] << 12) | \
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[3])] << 18); \
pos += 4; \
out[b1] = value >> 16; \
out[b2] = value >> 8; \
out[b3] = value;
// Don't copy this code without realising it mimics bugs in the original code!
// We are actually missing the last 16 bits with this implementation.
static void *get_binary(char *ciphertext)
{
static ARCH_WORD_32 outbuf[BINARY_SIZE/4];
ARCH_WORD_32 value;
char *pos;
unsigned char *out = (unsigned char*)outbuf;
int i;
memset(outbuf, 0, sizeof(outbuf));
pos = strrchr(ciphertext, '$') + 1;
for (i = 0; i < 20; i++) {
TO_BINARY(i, i + 21, i + 42);
}
value = (ARCH_WORD_32)atoi64[ARCH_INDEX(pos[0])] |
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[1])] << 6) |
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[2])] << 12) |
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[3])] << 18);
out[20] = value >> 16;
out[41] = value >> 8;
return (void *)out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_key_length[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_key_length[index] = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
}
static char *get_key(int index)
{
saved_key[index][saved_key_length[index]] = 0;
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
SHA512_CTX ctx;
SHA512_Init(&ctx);
/* First the password */
SHA512_Update(&ctx, saved_key[index], saved_key_length[index]);
/* Then the salt, including the $4$ magic */
SHA512_Update(&ctx, cur_salt, salt_len);
SHA512_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static void set_salt(void *salt)
{
salt_len = (int)*(char*)salt;
cur_salt = (char*)salt + 1;
}
// For 32-bit version of the bug, our magic is "$4$\0"
static void *get_salt_32(char *ciphertext)
{
static char *out;
int len;
if (!out) out = mem_alloc_tiny(SALT_SIZE_32, MEM_ALIGN_WORD);
memset(out, 0, SALT_SIZE_32);
ciphertext += 3;
strcpy(&out[1], "$4$");
for (len = 0; ciphertext[len] != '$'; len++);
memcpy(&out[5], ciphertext, len);
out[0] = len + 4;
return out;
}
// For 64-bit version of the bug, our magic is "$4$\0/etc"
static void *get_salt_64(char *ciphertext)
{
static char *out;
int len;
if (!out) out = mem_alloc_tiny(SALT_SIZE_64, MEM_ALIGN_WORD);
memset(out, 0, SALT_SIZE_64);
ciphertext += 3;
memcpy(&out[1], "$4$\0/etc", 8);
for (len = 0; ciphertext[len] != '$'; len++);
memcpy(&out[9], ciphertext, len);
out[0] = len + 8;
return out;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], USED_BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], USED_BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
// Public domain hash function by DJ Bernstein
static int salt_hash(void *salt)
{
unsigned char *s = (unsigned char*)salt + 1;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < *(unsigned char*)salt; i++)
hash = ((hash << 5) + hash) ^ s[i];
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_dragonfly4_32 = {
{
FORMAT_LABEL_32,
FORMAT_NAME_32,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
USED_BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE_32,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests_32
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt_32,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_dragonfly4_64 = {
{
FORMAT_LABEL_64,
FORMAT_NAME_64,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE_64,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests_64
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt_64,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
types.h | /* ----------------------------------------------------------------------
miniMD is a simple, parallel molecular dynamics (MD) code. miniMD is
an MD microapplication in the Mantevo project at Sandia National
Laboratories ( http://www.mantevo.org ). The primary
authors of miniMD are Steve Plimpton (sjplimp@sandia.gov) , Paul Crozier
(pscrozi@sandia.gov) and Christian Trott (crtrott@sandia.gov).
Copyright (2008) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This library is free software; you
can redistribute it and/or modify it under the terms of the GNU Lesser
General Public License as published by the Free Software Foundation;
either version 3 of the License, or (at your option) any later
version.
This library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
USA. See also: http://www.gnu.org/licenses/lgpl.txt .
For questions, contact Paul S. Crozier (pscrozi@sandia.gov) or
Christian Trott (crtrott@sandia.gov).
Please read the accompanying README and LICENSE files.
---------------------------------------------------------------------- */
#ifndef TYPES_H
#define TYPES_H
enum ForceStyle {FORCELJ, FORCEEAM};
struct double2 {
double x, y;
};
struct float2 {
float x, y;
};
struct double4 {
double x, y, z, w;
};
struct float4 {
float x, y, z, w;
};
#ifndef CHUNKSIZE
#define CHUNKSIZE 64
#endif
#ifdef NOCHUNK
#define OMPFORSCHEDULE #pragma omp for schedule(static)
#else
#define OMPFORSCHEDULE #pragma omp for schedule(static,CHUNKSIZE)
#endif
#ifndef PRECISION
#define PRECISION 2
#endif
#if PRECISION==1
typedef float MMD_float;
typedef float2 MMD_float2;
typedef float4 MMD_float4;
#else
typedef double MMD_float;
typedef double2 MMD_float2;
typedef double4 MMD_float4;
#endif
typedef int MMD_int;
typedef int MMD_bigint;
#ifndef PAD4
#define PAD 3
#else
#define PAD 4
#endif
#ifdef __INTEL_COMPILER
#ifndef ALIGNMALLOC
#define ALIGNMALLOC 64
#endif
#define RESTRICT __restrict
#endif
#ifndef RESTRICT
#define RESTRICT
#endif
#endif
|
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <rabit/rabit.h>
#include <xgboost/base.h>
#include <xgboost/span.h>
#include <xgboost/host_device_vector.h>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
namespace xgboost {
// forward declare dmatrix.
class DMatrix;
/*! \brief data type accepted by xgboost interface */
enum class DataType : uint8_t {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of data fields in MetaInfo */
static constexpr uint64_t kNumField = 7;
/*! \brief number of rows in the data */
uint64_t num_row_{0};
/*! \brief number of columns in the data */
uint64_t num_col_{0};
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0};
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_;
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_group_t> group_ptr_;
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_;
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_;
/*! \brief default constructor */
MetaInfo() = default;
MetaInfo& operator=(MetaInfo const& that) {
this->num_row_ = that.num_row_;
this->num_col_ = that.num_col_;
this->num_nonzero_ = that.num_nonzero_;
this->labels_.Resize(that.labels_.Size());
this->labels_.Copy(that.labels_);
this->group_ptr_ = that.group_ptr_;
this->weights_.Resize(that.weights_.Size());
this->weights_.Copy(that.weights_);
this->base_margin_.Resize(that.base_margin_.Size());
this->base_margin_.Copy(that.base_margin_);
return *this;
}
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
/*!
* \brief Set information in the meta info with array interface.
* \param key The key of the information.
* \param interface_str String representation of json format array interface.
*
* [ column_0, column_1, ... column_n ]
*
* Right now only 1 column is permitted.
*/
void SetInfo(const char* key, std::string const& interface_str);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_feature_t index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief Parameters for constructing batches.
*/
struct BatchParam {
/*! \brief The GPU device to use. */
int gpu_id;
/*! \brief Maximum number of bins per feature for histograms. */
int max_bin { 0 };
/*! \brief Number of rows in a GPU batch, used for finding quantiles on GPU. */
int gpu_batch_nrows;
/*! \brief Page size for external memory mode. */
size_t gpu_page_size;
BatchParam() = default;
BatchParam(int32_t device, int32_t max_bin, int32_t gpu_batch_nrows,
size_t gpu_page_size = 0) :
gpu_id{device},
max_bin{max_bin},
gpu_batch_nrows{gpu_batch_nrows},
gpu_page_size{gpu_page_size}
{}
inline bool operator!=(const BatchParam& other) const {
return gpu_id != other.gpu_id ||
max_bin != other.max_bin ||
gpu_batch_nrows != other.gpu_batch_nrows ||
gpu_page_size != other.gpu_page_size;
}
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<bst_row_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid{};
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size;
// in distributed mode, some partitions may not get any instance for a feature. Therefore
// we should set the size as zero
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
size = 0;
} else {
size = offset_vec[i + 1] - offset_vec[i];
}
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return Number of instances in the page. */
inline size_t Size() const {
return offset.Size() == 0 ? 0 : offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
/*! \brief Set the base row id for this page. */
inline void SetBaseRowId(size_t row_id) {
base_rowid = row_id;
}
SparsePage GetTranspose(int num_columns) const;
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/*!
* \brief Push row block into the page.
* \param batch the row batch.
*/
void Push(const dmlc::RowBlock<uint32_t>& batch);
/**
* \brief Pushes external data batch onto this page
*
* \tparam AdapterBatchT
* \param batch
* \param missing
* \param nthread
*
* \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns.
*/
template <typename AdapterBatchT>
uint64_t Push(const AdapterBatchT& batch, float missing, int nthread);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
};
class CSCPage: public SparsePage {
public:
CSCPage() : SparsePage() {}
explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class SortedCSCPage : public SparsePage {
public:
SortedCSCPage() : SparsePage() {}
explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class EllpackPageImpl;
/*!
* \brief A page stored in ELLPACK format.
*
* This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid
* including CUDA-specific implementation details in the header.
*/
class EllpackPage {
public:
/*!
* \brief Default constructor.
*
* This is used in the external memory case. An empty ELLPACK page is constructed with its content
* set later by the reader.
*/
EllpackPage();
/*!
* \brief Constructor from an existing DMatrix.
*
* This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix
* in CSR format.
*/
explicit EllpackPage(DMatrix* dmat, const BatchParam& param);
/*! \brief Destructor. */
~EllpackPage();
/*! \return Number of instances in the page. */
size_t Size() const;
/*! \brief Set the base row id for this page. */
void SetBaseRowId(size_t row_id);
const EllpackPageImpl* Impl() const { return impl_.get(); }
EllpackPageImpl* Impl() { return impl_.get(); }
private:
std::unique_ptr<EllpackPageImpl> impl_;
};
template<typename T>
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() = default;
virtual T& operator*() = 0;
virtual const T& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
template<typename T>
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag;
explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); }
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
T& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const T& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator& rhs) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::shared_ptr<BatchIteratorImpl<T>> impl_;
};
template<typename T>
class BatchSet {
public:
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(begin_iter) {}
BatchIterator<T> begin() { return begin_iter_; }
BatchIterator<T> end() { return BatchIterator<T>(nullptr); }
private:
BatchIterator<T> begin_iter_;
};
/*!
* \brief This is data structure that user can pass to DMatrix::Create
* to create a DMatrix for training, user can create this data structure
* for customized Data Loading on single machine.
*
* On distributed setting, usually an customized dmlc::Parser is needed instead.
*/
template<typename T>
class DataSource : public dmlc::DataIter<T> {
public:
/*!
* \brief Meta information about the dataset
* The subclass need to be able to load this correctly from data.
*/
MetaInfo info;
};
/*!
* \brief Internal data structured used by XGBoost during training.
* There are two ways to create a customized DMatrix that reads in user defined-format.
*
* - Provide a dmlc::Parser and pass into the DMatrix::Create
* - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by
* DMLC_REGISTER_DATA_PARSER;
* - This works best for user defined data input source, such as data-base, filesystem.
* - Provide a DataSource, that can be passed to DMatrix::Create
* This can be used to re-use inmemory data structure into DMatrix.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/**
* \brief Gets batches. Use range based for loop over BatchSet to access individual batches.
*/
template<typename T>
BatchSet<T> GetBatches(const BatchParam& param = {});
template <typename T>
bool PageExists() const;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief virtual destructor */
virtual ~DMatrix() = default;
/*! \brief Whether the matrix is dense. */
bool IsDense() const {
return Info().num_nonzero_ == Info().num_row_ * Info().num_col_;
}
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto",
size_t page_size = kPageSize);
/**
* \brief Creates a new DMatrix from an external data adapter.
*
* \tparam AdapterT Type of the adapter.
* \param [in,out] adapter View onto an external data.
* \param missing Values to count as missing.
* \param nthread Number of threads for construction.
* \param cache_prefix (Optional) The cache prefix for external memory.
* \param page_size (Optional) Size of the page.
*
* \return a Created DMatrix.
*/
template <typename AdapterT>
static DMatrix* Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix = "",
size_t page_size = kPageSize);
/*! \brief page size 32 MB */
static const size_t kPageSize = 32UL << 20UL;
protected:
virtual BatchSet<SparsePage> GetRowBatches() = 0;
virtual BatchSet<CSCPage> GetColumnBatches() = 0;
virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0;
virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0;
virtual bool EllpackExists() const = 0;
virtual bool SparsePageExists() const = 0;
};
template<>
inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) {
return GetRowBatches();
}
template<>
inline bool DMatrix::PageExists<EllpackPage>() const {
return this->EllpackExists();
}
template<>
inline bool DMatrix::PageExists<SparsePage>() const {
return this->SparsePageExists();
}
template<>
inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetColumnBatches();
}
template<>
inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetSortedColumnBatches();
}
template<>
inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) {
return GetEllpackBatches(param);
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
}
#endif // XGBOOST_DATA_H_
|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include "./operator_tune.h"
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda_utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template<>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template<>
inline int get_num_threads<cpu>(const int N) {
return engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
/*! \brief operator request type switch */
#define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
{ \
const OpReqType ReqType = kNullOp; \
{__VA_ARGS__} \
} \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
LOG(FATAL) << "This operation does not " \
"support float16"; \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
template <typename T>
struct AccType {
using type = T;
};
template <>
struct AccType<mshadow::half::half_t> {
using type = float;
};
#define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
typedef uint8_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types not uint8"; \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int8_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types not int8"; \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int32_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not int32"; \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not int64"; \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
typedef uint32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT_TYPE_SWITCH(type, DType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float32"; \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float64"; \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float16"; \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Invalid loading enum type " << type; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
#define MXNET_ADD_ALL_TYPES \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64)
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret += coord[i] * stride[i];
}
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx, const Shape<ndim>& stride) {
++(*coord)[ndim-1];
*idx += stride[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx = *idx + stride[i-1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx1, const Shape<ndim>& stride1,
index_t* idx2, const Shape<ndim>& stride2) {
++(*coord)[ndim-1];
*idx1 += stride1[ndim-1];
*idx2 += stride2[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
MSHADOW_TYPE_SWITCH(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s);
} else {
MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template<typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template<typename DType, typename ...Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
/*! \brief Binary op backward gradient OP wrapper (tuned) */
template<typename GRAD_OP>
struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable {
using backward_grad<GRAD_OP>::Map;
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template<typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is tensor and two scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in,
const DType value_1, const DType value_2) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2));
}
/*! \brief No inputs (ie fill to constant value) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1, const DType *input_2, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1,
const DType *input_2,
const DType *input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
};
template<typename OP, typename xpu>
struct Kernel;
/*!
* \brief CPU Kernel launcher
* \tparam OP Operator to launch
*/
template<typename OP>
struct Kernel<OP, cpu> {
/*!
* \brief Launch a generic CPU kernel.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch a generic CPU kernel with dynamic schedule. This is recommended
* for irregular workloads such as spmv.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false);
if (omp_threads < 2) {
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads) schedule(dynamic)
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch CPU kernel which has OMP tuning data available.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam PRIMITIVE_OP The primitive operation to use for tuning
* \tparam DType Data type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename PRIMITIVE_OP, typename DType, typename ...Args>
static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP(
N, static_cast<size_t>(omp_threads))) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch custom-tuned kernel where each thread is set to
* operate on a contiguous partition
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions
*/
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
OP::Map(0, N, args...);
} else {
const auto length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
/*!
* \brief Launch a tunable OP with implicitly-supplied data type
* \tparam DType Data type
* \tparam T OP type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<T, DType>(s, N, dest, args...);
return true;
}
/*!
* \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req)
* \tparam DType Data type
* \tparam T Wrapper type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<typename T::Operation, DType>(s, N, dest, args...);
return true;
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
if (0 == N) return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel);
}
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) {
if (0 == N) return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex);
}
};
#endif // __CUDACC__
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<int val>
struct set_to_int : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
wow_srp_fmt_plug.c | /*
* This software was written by Jim Fougeron jfoug AT cox dot net
* in 2012. No copyright is claimed, and the software is hereby
* placed in the public domain. In case this attempt to disclaim
* copyright and place the software in the public domain is deemed
* null and void, then the software is Copyright (c) 2012 Jim Fougeron
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*
*
* This implements the SRP protocol, with Blizzard's (battlenet) documented
* implementation specifics.
*
* U = username in upper case
* P = password in upper case
* s = random salt value.
*
* x = SHA1(s . SHA1(U . ":" . P));
* v = 47^x % 112624315653284427036559548610503669920632123929604336254260115573677366691719
*
* v is the 'verifier' value (256 bit value).
*
* Added OMP. Added 'default' oSSL BigNum exponentiation.
* GMP exponentation (faster) is optional, and controlled with HAVE_LIBGMP in autoconfig.h
*
* NOTE, big fix required. The incoming binary may be 64 bytes OR LESS. It
* can also be 64 bytes (or less), and have left padded 0's. We have to adjust
* several things to handle this properly. First, valid must handle it. Then
* binary and salt both must handle this. Also, crypt must handle this. NOTE,
* the string 'could' be an odd length. If so, then only 1 byte of hex is put
* into the first binary byte. all of these problems were found once I got
* jtrts.pl working with wowsrp. There now are 2 input files for wowsrp. One
* bytes of precision, then only 61 bytes will be in the string). The other
* file left pads the numbers with 0's to an even 64 bytes long, so all are
* 64 bytes. the format MUST handle both, since at this momement, we are not
* exactly sure which type will be seen in the wild. NOTE, the byte swapped
* method (GMP) within is no longer valid, and was removed.
* NOTE, we need to add split() to canonize this format (remove LPad 0's)
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_blizzard;
#elif FMT_REGISTERS_H
john_register_one(&fmt_blizzard);
#else
#if AC_BUILT
/* we need to know if HAVE_LIBGMP is defined */
#include "autoconfig.h"
#endif
#include <string.h>
#include "sha.h"
#include "sha2.h"
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "unicode.h" /* For encoding-aware uppercasing */
#ifdef HAVE_LIBGMP
#if HAVE_GMP_GMP_H
#include <gmp/gmp.h>
#else
#include <gmp.h>
#endif
#define EXP_STR " GMP-exp"
#else
#include <openssl/bn.h>
#define EXP_STR " oSSL-exp"
#endif
#include "johnswap.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "WoWSRP"
#define FORMAT_NAME "Battlenet"
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR EXP_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define WOWSIG "$WoWSRP$"
#define WOWSIGLEN (sizeof(WOWSIG)-1)
// min plaintext len is 8 PW's are only alpha-num uppercase
#define PLAINTEXT_LENGTH 16
#define CIPHERTEXT_LENGTH 64
#define BINARY_SIZE 4
#define BINARY_ALIGN 4
#define FULL_BINARY_SIZE 32
#define SALT_SIZE (64+3)
#define SALT_ALIGN 1
#define USERNAMELEN 32
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 4
// salt is in hex (salt and salt2)
static struct fmt_tests tests[] = {
{WOWSIG"6D00CD214C8473C7F4E9DC77AE8FC6B3944298C48C7454E6BB8296952DCFE78D$73616C74", "PASSWORD", {"SOLAR"}},
{WOWSIG"A35DCC134159A34F1D411DA7F38AB064B617D5DBDD9258FE2F23D5AB1CF3F685$73616C7432", "PASSWORD2", {"DIZ"}},
{WOWSIG"A35DCC134159A34F1D411DA7F38AB064B617D5DBDD9258FE2F23D5AB1CF3F685$73616C7432*DIZ", "PASSWORD2"},
// this one has a leading 0
{"$WoWSRP$01C7F618E4589F3229D764580FDBF0D579D7CB1C071F11C856BDDA9E41946530$36354172646F744A366A7A58386D4D6E*JOHN", "PASSWORD"},
// same hash, but without 0 (only 63 byte hash).
{"$WoWSRP$1C7F618E4589F3229D764580FDBF0D579D7CB1C071F11C856BDDA9E41946530$36354172646F744A366A7A58386D4D6E*JOHN", "PASSWORD"},
{NULL}
};
#ifdef HAVE_LIBGMP
typedef struct t_SRP_CTX {
mpz_t z_mod, z_base, z_exp, z_rop;
} SRP_CTX;
#else
typedef struct t_SRP_CTX {
BIGNUM *z_mod, *z_base, *z_exp, *z_rop;
BN_CTX *BN_ctx;
}SRP_CTX;
#endif
static SRP_CTX *pSRP_CTX;
static unsigned char saved_salt[SALT_SIZE];
static unsigned char user_id[USERNAMELEN];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[8];
static int max_keys_per_crypt;
static void init(struct fmt_main *self)
{
int i;
#if defined (_OPENMP)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
pSRP_CTX = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*pSRP_CTX));
max_keys_per_crypt = self->params.max_keys_per_crypt;
for (i = 0; i < max_keys_per_crypt; ++i) {
#ifdef HAVE_LIBGMP
mpz_init_set_str(pSRP_CTX[i].z_mod, "112624315653284427036559548610503669920632123929604336254260115573677366691719", 10);
mpz_init_set_str(pSRP_CTX[i].z_base, "47", 10);
mpz_init_set_str(pSRP_CTX[i].z_exp, "1", 10);
mpz_init(pSRP_CTX[i].z_rop);
// Now, properly initialized mpz_exp, so it is 'large enough' to hold any SHA1 value
// we need to put into it. Then we simply need to copy in the data, and possibly set
// the limb count size.
mpz_mul_2exp(pSRP_CTX[i].z_exp, pSRP_CTX[i].z_exp, 159);
#else
pSRP_CTX[i].z_mod=BN_new();
BN_dec2bn(&pSRP_CTX[i].z_mod, "112624315653284427036559548610503669920632123929604336254260115573677366691719");
pSRP_CTX[i].z_base=BN_new();
BN_set_word(pSRP_CTX[i].z_base, 47);
pSRP_CTX[i].z_exp=BN_new();
pSRP_CTX[i].z_rop=BN_new();
pSRP_CTX[i].BN_ctx = BN_CTX_new();
#endif
}
}
static void done(void)
{
int i;
for (i = 0; i < max_keys_per_crypt; ++i) {
#ifdef HAVE_LIBGMP
mpz_clear(pSRP_CTX[i].z_mod);
mpz_clear(pSRP_CTX[i].z_base);
mpz_clear(pSRP_CTX[i].z_exp);
mpz_clear(pSRP_CTX[i].z_rop);
#else
BN_clear_free(pSRP_CTX[i].z_mod);
BN_clear_free(pSRP_CTX[i].z_base);
BN_clear_free(pSRP_CTX[i].z_exp);
BN_clear_free(pSRP_CTX[i].z_rop);
BN_CTX_free(pSRP_CTX[i].BN_ctx);
#endif
}
MEM_FREE(pSRP_CTX);
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
if (strncmp(ciphertext, WOWSIG, WOWSIGLEN))
return 0;
q = p = &ciphertext[WOWSIGLEN];
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
if (q-p > CIPHERTEXT_LENGTH)
return 0;
if (*q != '$')
return 0;
++q;
p = strchr(q, '*');
if (!p)
return 0;
if (((p - q) & 1))
return 0;
if (p - q >= 2 * SALT_SIZE)
return 0;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
if (q != p)
return 0;
if (strlen(&p[1]) > USERNAMELEN)
return 0;
return 1;
}
/*
* Copy as much as ct2_size to ct2 to avoid buffer overflow
*/
static void StripZeros(const char *ct, char *ct2, const int ct2_size) {
int i;
for (i = 0; i < WOWSIGLEN && i < (ct2_size - 1); ++i)
*ct2++ = *ct++;
while (*ct == '0')
++ct;
while (*ct && i < (ct2_size - 1)) {
*ct2++ = *ct++;
i++;
}
*ct2 = 0;
}
static char *prepare(char *split_fields[10], struct fmt_main *pFmt) {
// if user name not there, then add it
static char ct[128+32+1];
char *cp;
if (!split_fields[1][0] || strncmp(split_fields[1], WOWSIG, WOWSIGLEN))
return split_fields[1];
cp = strchr(split_fields[1], '*');
if (cp) {
if (split_fields[1][WOWSIGLEN] == '0') {
StripZeros(split_fields[1], ct, sizeof(ct));
return ct;
}
return split_fields[1];
}
if (strnlen(split_fields[1], 129) <= 128) {
strnzcpy(ct, split_fields[1], 128);
cp = &ct[strlen(ct)];
*cp++ = '*';
strnzcpy(cp, split_fields[0], USERNAMELEN);
// upcase user name
enc_strupper(cp);
// Ok, if there are leading 0's for that binary resultant value, then remove them.
if (ct[WOWSIGLEN] == '0') {
char ct2[128+32+1];
StripZeros(ct, ct2, sizeof(ct2));
strcpy(ct, ct2);
}
return ct;
}
return split_fields[1];
}
static char *split(char *ciphertext, int index, struct fmt_main *pFmt) {
static char ct[128+32+1];
char *cp;
strnzcpy(ct, ciphertext, 128+32+1);
cp = strchr(ct, '*');
if (cp) *cp = 0;
strupr(&ct[WOWSIGLEN]);
if (cp) *cp = '*';
if (ct[WOWSIGLEN] == '0') {
char ct2[128+32+1];
StripZeros(ct, ct2, sizeof(ct2));
strcpy(ct, ct2);
}
return ct;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char b[FULL_BINARY_SIZE];
uint32_t dummy[1];
} out;
char *p, *q;
int i;
p = &ciphertext[WOWSIGLEN];
q = strchr(p, '$');
memset(out.b, 0, sizeof(out.b));
while (*p == '0')
++p;
if ((q-p)&1) {
out.b[0] = atoi16[ARCH_INDEX(*p)];
++p;
} else {
out.b[0] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
for (i = 1; i < FULL_BINARY_SIZE; i++) {
out.b[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
if (p >= q)
break;
}
//dump_stuff_msg("binary", out.b, 32);
return out.b;
}
static void *get_salt(char *ciphertext)
{
static union {
unsigned char b[SALT_SIZE];
uint32_t dummy;
} out;
char *p;
int length=0;
memset(out.b, 0, SALT_SIZE);
p = strchr(&ciphertext[WOWSIGLEN], '$') + 1;
// We need to know if this is odd length or not.
while (atoi16[ARCH_INDEX(*p++)] != 0x7f)
length++;
p = strchr(&ciphertext[WOWSIGLEN], '$') + 1;
// handle odd length hex (yes there can be odd length in these SRP files).
if ((length&1)&&atoi16[ARCH_INDEX(*p)] != 0x7f) {
length=0;
out.b[++length] = atoi16[ARCH_INDEX(*p)];
++p;
} else
length = 0;
while (atoi16[ARCH_INDEX(*p)] != 0x7f && atoi16[ARCH_INDEX(p[1])] != 0x7f) {
out.b[++length] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
out.b[0] = length;
if (*p) {
++p;
memcpy(out.b + length+1, p, strlen(p)+1);
}
return out.b;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static int salt_hash(void *salt)
{
unsigned int hash = 0;
char *p = (char *)salt;
while (*p) {
hash <<= 1;
hash += (unsigned char)*p++;
if (hash >> SALT_HASH_LOG) {
hash ^= hash >> SALT_HASH_LOG;
hash &= (SALT_HASH_SIZE - 1);
}
}
hash ^= hash >> SALT_HASH_LOG;
hash &= (SALT_HASH_SIZE - 1);
return hash;
}
static void set_salt(void *salt)
{
unsigned char *cp = (unsigned char*)salt;
memcpy(saved_salt, &cp[1], *cp);
saved_salt[*cp] = 0;
strcpy((char*)user_id, (char*)&cp[*cp+1]);
}
static void set_key(char *key, int index)
{
strnzcpyn(saved_key[index], key, sizeof(*saved_key));
enc_strupper(saved_key[index]);
}
static char *get_key(int index)
{
return saved_key[index];
}
// x = SHA1(s, H(U, ":", P));
// v = 47^x % 112624315653284427036559548610503669920632123929604336254260115573677366691719
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int j;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j = 0; j < count; ++j) {
SHA_CTX ctx;
unsigned char Tmp[20];
memset(crypt_out[j], 0, sizeof(crypt_out[j]));
SHA1_Init(&ctx);
SHA1_Update(&ctx, user_id, strlen((char*)user_id));
SHA1_Update(&ctx, ":", 1);
SHA1_Update(&ctx, saved_key[j], strlen(saved_key[j]));
SHA1_Final(Tmp, &ctx);
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_salt, strlen((char*)saved_salt));
SHA1_Update(&ctx, Tmp, 20);
SHA1_Final(Tmp, &ctx);
// Ok, now Tmp is v
//if (!strcmp(saved_key[j], "ENTERNOW__1") && !strcmp((char*)user_id, "DIP")) {
// printf("salt=%s user=%s pass=%s, ", (char*)saved_salt, (char*)user_id, saved_key[j]);
// dump_stuff_msg("sha$h ", Tmp, 20);
//}
#ifdef HAVE_LIBGMP
{
unsigned char HashStr[80], *p;
int i, todo;
p = HashStr;
for (i = 0; i < 20; ++i) {
*p++ = itoa16[Tmp[i]>>4];
*p++ = itoa16[Tmp[i]&0xF];
}
*p = 0;
mpz_set_str(pSRP_CTX[j].z_exp, (char*)HashStr, 16);
mpz_powm (pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod );
mpz_get_str ((char*)HashStr, 16, pSRP_CTX[j].z_rop);
p = HashStr;
todo = strlen((char*)p);
if (todo&1) {
((unsigned char*)(crypt_out[j]))[0] = atoi16[ARCH_INDEX(*p)];
++p;
--todo;
} else {
((unsigned char*)(crypt_out[j]))[0] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
todo -= 2;
}
todo >>= 1;
for (i = 1; i <= todo; i++) {
((unsigned char*)(crypt_out[j]))[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
//if (!strcmp(saved_key[j], "ENTERNOW__1") && !strcmp((char*)user_id, "DIP")) {
// dump_stuff_msg("crypt ", crypt_out[j], 32);
//}
}
#else
// using oSSL's BN to do expmod.
pSRP_CTX[j].z_exp = BN_bin2bn(Tmp,20,pSRP_CTX[j].z_exp);
BN_mod_exp(pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod, pSRP_CTX[j].BN_ctx);
BN_bn2bin(pSRP_CTX[j].z_rop, (unsigned char*)(crypt_out[j]));
//if (!strcmp(saved_key[j], "ENTERNOW__1") && !strcmp((char*)user_id, "DIP")) {
// dump_stuff_msg("crypt ", crypt_out[j], 32);
//}
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int i;
for (i = 0; i < count; ++i) {
if (*((uint32_t*)binary) == *((uint32_t*)(crypt_out[i])))
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
return *((uint32_t*)binary) == *((uint32_t*)(crypt_out[index]));
}
static int cmp_exact(char *source, int index)
{
return !memcmp(get_binary(source), crypt_out[index], BINARY_SIZE);
}
struct fmt_main fmt_blizzard = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
8,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP,
{ NULL },
{ WOWSIG },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
has160_fmt_plug.c | /* HAS160-512 cracker patch for JtR. Hacked together during May, 2015
* by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* Thanks for RHash, http://www.randombit.net/has160.html and
* https://github.com/maciejczyzewski/retter for the code.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt__HAS160;
#elif FMT_REGISTERS_H
john_register_one(&fmt__HAS160);
#else
#include <string.h>
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "has160.h"
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
#ifndef OMP_SCALE
#ifdef __MIC__
#define OMP_SCALE 64
#else
#define OMP_SCALE 2048
#endif // __MIC__
#endif // OMP_SCALE
#include <omp.h>
#endif // _OPENMP
#include "memdbg.h"
#define FORMAT_LABEL "has-160"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "HAS-160 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define CIPHERTEXT_LENGTH 40
#define BINARY_SIZE 20
#define SALT_SIZE 0
#define BINARY_ALIGN 4
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"307964ef34151d37c8047adec7ab50f4ff89762d", ""},
{"cb5d7efbca2f02e0fb7167cabb123af5795764e5", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"},
{"4872bcbc4cd0f0a9dc7c2f7045e5b43b6c830db8", "a"},
{"975e810488cf2a3d49838478124afce4b1c78804", "abc"},
{"2338dbc8638d31225f73086246ba529f96710bc6", "message digest"},
{"596185c9ab6703d0d0dbb98702bc0f5729cd1d3c", "abcdefghijklmnopqrstuvwxyz"},
{"07f05c8c0773c55ca3a5a695ce6aca4c438911b5", "12345678901234567890123456789012345678901234567890123456789012345678901234567890"},
{NULL}
};
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[(BINARY_SIZE) / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len));
saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
MEM_FREE(saved_len);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
q = p;
while (atoi16l[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q && q - p == CIPHERTEXT_LENGTH;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
char *p;
int i;
if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
p = ciphertext;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index)
{
return crypt_out[index][0] & PH_MASK_0;
}
static int get_hash_1(int index)
{
return crypt_out[index][0] & PH_MASK_1;
}
static int get_hash_2(int index)
{
return crypt_out[index][0] & PH_MASK_2;
}
static int get_hash_3(int index)
{
return crypt_out[index][0] & PH_MASK_3;
}
static int get_hash_4(int index)
{
return crypt_out[index][0] & PH_MASK_4;
}
static int get_hash_5(int index)
{
return crypt_out[index][0] & PH_MASK_5;
}
static int get_hash_6(int index)
{
return crypt_out[index][0] & PH_MASK_6;
}
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_len[index] = PLAINTEXT_LENGTH;
saved_key[index][len] = 0;
memcpy(saved_key[index], key, len);
}
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
has160_ctx ctx;
rhash_has160_init(&ctx);
rhash_has160_update(&ctx, (unsigned char*)saved_key[index], saved_len[index]);
rhash_has160_final(&ctx, (unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt__HAS160 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT,
{ NULL },
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__asin_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__asin_fc64_fc64)
// op(A') function: GB (_unop_tran__asin_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = casin (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = casin (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = casin (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASIN || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__asin_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = casin (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = casin (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__asin_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bl_dgemm_ref.c |
#include "bl_dgemm.h"
#ifdef USE_BLAS
/*
* dgemm prototype
*
*/
//void dgemm(char*, char*, int*, int*, int*, double*, double*,
// int*, double*, int*, double*, double*, int*);
extern void dgemm_(char*, char*, int*, int*, int*, double*, double*,
int*, double*, int*, double*, double*, int*);
#endif
void bl_dgemm_ref(
int m,
int n,
int k,
double *XA,
int lda,
double *XB,
int ldb,
double *XC,
int ldc
)
{
// Local variables.
int i, j, p;
double alpha = 1.0, beta = 1.0;
// Sanity check for early return.
if ( m == 0 || n == 0 || k == 0 ) return;
// Reference GEMM implementation.
#ifdef USE_BLAS
dgemm_( "N", "N", &m, &n, &k, &alpha,
XA, &lda, XB, &ldb, &beta, XC, &ldc );
#else
#pragma omp parallel for private( i, p )
for ( j = 0; j < n; j ++ ) {
for ( i = 0; i < m; i ++ ) {
for ( p = 0; p < k; p ++ ) {
XC[ j * ldc + i ] += XA[ p * lda + i ] * XB[ j * ldb + p ];
}
}
}
#endif
}
|
courtemanche_ramirez_nattel_1998.c | #include "courtemanche_ramirez_nattel_1998.h"
#include <stdlib.h>
real max_step;
real min_step;
real abstol;
real reltol;
bool adpt;
real *ode_dt, *ode_previous_dt, *ode_time_new;
GET_CELL_MODEL_DATA(init_cell_model_data) {
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
log_to_stdout_and_file("Using courtemanche_ramirez_nattel_1998 CPU model\n");
uint32_t num_cells = solver->original_num_cells;
solver->sv = (real*)malloc(NEQ*num_cells*sizeof(real));
max_step = solver->max_dt;
min_step = solver->min_dt;
abstol = solver->abs_tol;
reltol = solver->rel_tol;
adpt = solver->adaptive;
if(adpt) {
ode_dt = (real*)malloc(num_cells*sizeof(real));
OMP(parallel for)
for(int i = 0; i < num_cells; i++) {
ode_dt[i] = solver->min_dt;
}
ode_previous_dt = (real*)calloc(num_cells, sizeof(real));
ode_time_new = (real*)calloc(num_cells, sizeof(real));
log_to_stdout_and_file("Using Adaptive Euler model to solve the ODEs\n");
} else {
log_to_stdout_and_file("Using Euler model to solve the ODEs\n");
}
OMP(parallel for)
for(uint32_t i = 0; i < num_cells; i++) {
real *sv = &solver->sv[i * NEQ];
sv[0] = -8.118000e+01f; //V millivolt
sv[1] = 2.908000e-03f; //m dimensionless
sv[2] = 9.649000e-01f; //h dimensionless
sv[3] = 9.775000e-01f; //j dimensionless
sv[4] = 3.043000e-02f; //oa dimensionless
sv[5] = 9.992000e-01f; //oi dimensionless
sv[6] = 4.966000e-03f; //ua dimensionless
sv[7] = 9.986000e-01f; //ui dimensionless
sv[8] = 3.296000e-05f; //xr dimensionless
sv[9] = 1.869000e-02f; //xs dimensionless
sv[10] = 1.367000e-04f; //d dimensionless
sv[11] = 9.996000e-01f; //f dimensionless
sv[12] = 7.755000e-01f; //f_Ca dimensionless
sv[13] = 0.0; //u dimensionless
sv[14] = 1.000000e+00f; //v dimensionless
sv[15] = 9.992000e-01f; //w dimensionless
sv[16] = 1.117000e+01f; //Na_i millimolar
sv[17] = 1.390000e+02f; //K_i millimolar
sv[18] = 1.013000e-04f; //Ca_i millimolar
sv[19] = 1.488000e+00f; //Ca_up millimolar
sv[20] = 1.488000e+00f; //Ca_rel millimolar
}
}
SOLVE_MODEL_ODES(solve_model_odes_cpu) {
uint32_t sv_id;
size_t num_cells_to_solve = ode_solver->num_cells_to_solve;
uint32_t * cells_to_solve = ode_solver->cells_to_solve;
real *sv = ode_solver->sv;
real dt = ode_solver->min_dt;
uint32_t num_steps = ode_solver->num_steps;
#pragma omp parallel for private(sv_id)
for (u_int32_t i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
if(adpt) {
solve_forward_euler_cpu_adpt(sv + (sv_id * NEQ), stim_currents[i], current_t + dt, sv_id);
}
else {
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = dt*rDY[i] + rY[i];
}
void solve_forward_euler_cpu_adpt(real *sv, real stim_curr, real final_time, int sv_id) {
real rDY[NEQ];
real _tolerances_[NEQ];
real _aux_tol = 0.0;
//initializes the variables
real dt = ode_dt[sv_id];
real time_new = ode_time_new[sv_id];
real previous_dt = ode_previous_dt[sv_id];
real edos_old_aux_[NEQ];
real edos_new_euler_[NEQ];
real *_k1__ = (real*) malloc(sizeof(real)*NEQ);
real *_k2__ = (real*) malloc(sizeof(real)*NEQ);
real *_k_aux__;
const real _beta_safety_ = 0.8;
const real __tiny_ = pow(abstol, 2.0f);
if(time_new + dt > final_time) {
dt = final_time - time_new;
}
RHS_cpu(sv, rDY, stim_curr, dt);
time_new += dt;
for(int i = 0; i < NEQ; i++){
_k1__[i] = rDY[i];
}
int count = 0;
int count_limit = (final_time - time_new)/min_step;
int aux_count_limit = count_limit+2000000;
if(aux_count_limit > 0) {
count_limit = aux_count_limit;
}
while(1) {
for(int i = 0; i < NEQ; i++) {
//stores the old variables in a vector
edos_old_aux_[i] = sv[i];
//computes euler method
edos_new_euler_[i] = _k1__[i] * dt + edos_old_aux_[i];
//steps ahead to compute the rk2 method
sv[i] = edos_new_euler_[i];
}
time_new += dt;
RHS_cpu(sv, rDY, stim_curr, dt);
time_new -= dt;//step back
double greatestError = 0.0, auxError = 0.0;
for(int i = 0; i < NEQ; i++) {
// stores the new evaluation
_k2__[i] = rDY[i];
_aux_tol = fabs(edos_new_euler_[i]) * reltol;
_tolerances_[i] = (abstol > _aux_tol) ? abstol : _aux_tol;
// finds the greatest error between the steps
auxError = fabs(((dt / 2.0) * (_k1__[i] - _k2__[i])) / _tolerances_[i]);
greatestError = (auxError > greatestError) ? auxError : greatestError;
}
///adapt the time step
greatestError += __tiny_;
previous_dt = dt;
///adapt the time step
dt = _beta_safety_ * dt * sqrt(1.0f/greatestError);
if (time_new + dt > final_time) {
dt = final_time - time_new;
}
//it doesn't accept the solution
if ( count < count_limit && (greatestError >= 1.0f)) {
//restore the old values to do it again
for(int i = 0; i < NEQ; i++) {
sv[i] = edos_old_aux_[i];
}
count++;
//throw the results away and compute again
} else{//it accepts the solutions
count = 0;
if (dt < min_step) {
dt = min_step;
}
else if (dt > max_step && max_step != 0) {
dt = max_step;
}
if (time_new + dt > final_time) {
dt = final_time - time_new;
}
_k_aux__ = _k2__;
_k2__ = _k1__;
_k1__ = _k_aux__;
//it steps the method ahead, with euler solution
for(int i = 0; i < NEQ; i++){
sv[i] = edos_new_euler_[i];
}
if(time_new + previous_dt >= final_time) {
if((fabs(final_time - time_new) < 1.0e-5)) {
break;
} else if(time_new < final_time) {
dt = previous_dt = final_time - time_new;
time_new += previous_dt;
break;
} else {
dt = previous_dt = min_step;
time_new += (final_time - time_new);
printf("Error: %lf\n", final_time - time_new);
break;
}
} else {
time_new += previous_dt;
}
}
}
ode_dt[sv_id] = dt;
ode_time_new[sv_id] = time_new;
ode_previous_dt[sv_id] = previous_dt;
free(_k1__);
free(_k2__);
}
void RHS_cpu(const real *sv, real *rDY, real stim_current, real dt) {
//State variables
const real V_old_ = sv[0];
const real m_old_ = sv[1];
const real h_old_ = sv[2];
const real j_old_ = sv[3];
const real oa_old_ = sv[4];
const real oi_old_ = sv[5];
const real ua_old_ = sv[6];
const real ui_old_ = sv[7];
const real xr_old_ = sv[8];
const real xs_old_ = sv[9];
const real d_old_ = sv[10];
const real f_old_ = sv[11];
const real f_Ca_old_ = sv[12];
const real u_old_ = sv[13];
const real v_old_ = sv[14];
const real w_old_ = sv[15];
const real Na_i_old_ = sv[16];
const real K_i_old_ = sv[17];
const real Ca_i_old_ = sv[18];
const real Ca_up_old_ = sv[19];
const real Ca_rel_old_ = sv[20];
#include "courtemanche_ramirez_nattel_1998_common.inc.c"
}
|
GxB_BinaryOp_ytype_name.c | //------------------------------------------------------------------------------
// GxB_BinaryOp_ytype_name: return the type_name of y for z=f(x,y)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_BinaryOp_ytype_name // return the name of the type of x
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_BinaryOp binaryop
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_BinaryOp_ytype_name (type_name, op)") ;
GB_RETURN_IF_NULL (type_name) ;
GB_RETURN_IF_NULL_OR_FAULTY (binaryop) ;
ASSERT_BINARYOP_OK (binaryop, "binaryop for ytype_name", GB0) ;
//--------------------------------------------------------------------------
// get the type_name
//--------------------------------------------------------------------------
memcpy (type_name, binaryop->ytype->name, GxB_MAX_NAME_LEN) ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
omp-task.c | /*
* Copyright (C) 2010 Computer Architecture and Parallel Systems Laboratory (CAPSL)
*
* Original author: Elkin E. Garcia
* E-Mail: egarcia@capsl.udel.edu
*
* License
* Redistribution of this code is allowed only after an explicit permission is
* given by the original author or CAPSL and this license should be included in
* all files, either existing or new ones. Modifying the code is allowed, but
* the original author and/or CAPSL must be notified about these modifications.
* The original author and/or CAPSL is also allowed to use these modifications
* and publicly report results that include them. Appropriate acknowledgments
* to everyone who made the modifications will be added in this case.
*
* Warranty
*
* THIS CODE IS PROVIDED ON AN "AS IS"
* THIS CODE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND,
* EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT
* THE COVERED CODE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR
* PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
* OF THE COVERED CODE IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN
* ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME
* THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
* OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY
* COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
*/
/*
Static blocked LU Decomposition
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <omp.h>
#include <math.h>
#include <sys/time.h>
//#define DEBUG 1
//#define CHECK 1
#define MEASURE 1
void Print_Matrix (double *v, int M, int N);
void InitMatrix(double *A, int N);
void ProcessDiagonalBlock(double *A, int L1, int N);
void ProcessBlockOnRow(double *A, double *D, int L1, int L3, int N);
void ProcessBlockOnColumn(double *A, double *D, int L1, int L2, int N);
void ProcessInnerBlock(double *A, double *R, double *C, int L1, int L2, int L3, int N);
void stepLU(double *A, int Block, int offset, int N );
void InitMatrix2(double *A, int N);
void InitMatrix3(double *A, int N);
void stage1(double *A, int offset, int *sizedim, int *start, int N, int M);
void stage2(double *A, int offset, int *sizedim, int *start, int N, int M);
void stage3(double *A, int offset, int *sizedim, int *start, int N, int M);
void lu2 (double *A, int N);
unsigned long GetTickCount()
{
struct timeval tv;
gettimeofday(&tv,NULL);
return (tv.tv_sec * 1000000) + (tv.tv_usec);
}
int N = 100;
int Block = 1;
int M=1; //number of blocks per dimension
int main (int argc, char *argv[])
{
double *A,*A2,*L,*U, temp2;
int i,j,k;
int temp=0;
int offset = 0;
double t1,t2;
if( argc > 1 )
N = atoi(argv[1]);
if( argc > 2 )
//Block = atoi(argv[2]);
M = atoi(argv[2]);
A = (double *)malloc (N*N*sizeof(double));
L = (double *)malloc (N*N*sizeof(double));
U = (double *)malloc (N*N*sizeof(double));
if( A==NULL || L==NULL || U==NULL) {
printf("Can't allocate memory\n");
exit(1);
}
#ifdef CHECK
A2 = (double *)malloc (N*N*sizeof(double));
if( A2==NULL ){
printf("Can't allocate memory\n");
exit(1);
}
#endif
/* INITIALIZATION */
//InitMatrix(A,N);
//InitMatrix3(A,N);
#ifdef CHECK
for(i=0; i<N*N; i++) {
A2[i] = A[i]; // Copy of A for verification of correctness
L[i] = 0;
U[i] = 0;
}
#endif
/* /\* LU DECOMPOSITION *\/ */
/* for (k=0;k<N-1;k++){ */
/* for (i=k+1;i<N;i++){ */
/* A[i*N+k] = A[i*N+k]/A[k*N+k]; */
/* /\* for (i=k+1;i<N;i++) *\/ */
/* for (j=k+1;j<N;j++) */
/* A[i*N+j] = A[i*N+j] - A[i*N+k]*A[k*N+j]; */
/* } */
/* } */
int *sizedim;
int *start;
int R; //Remain
int itr = 0;
sizedim = (int*)malloc(M*sizeof(int));
start = (int*)malloc(M*sizeof(int));
R = N;
t1 = GetTickCount();
#pragma omp parallel
{
//printf("The number of thread: %d\n", omp_get_num_threads());
#pragma omp master
{
while (N-offset>M){
// printf(" Iteration: %d\n", itr++);
for (i=0;i<M;i++){
if (i<R%M){
sizedim[i]=R/M+1;
start[i]=(R/M+1)*i;
}
else{
sizedim[i]=R/M;
start[i]=(R/M+1)*(R%M)+(R/M)*(i-R%M);
}
//printf("%i,%i \n",sizedim[i],start[i]);
}
//Print_Matrix(sizedim,1,M);
stage1(A, offset, sizedim, start, N, M);
//Print_Matrix(A,N,N);
stage2(A, offset, sizedim, start, N, M);
//Print_Matrix(A,N,N);
stage3(A, offset, sizedim, start, N, M);
offset+=sizedim[0];
R=R-sizedim[0];
//Print_Matrix(A,N,N);
} //while
} //master
} //omp parallel
ProcessDiagonalBlock(&A[offset*N+offset], N-offset, N);
t2 = GetTickCount();
printf("Time for LU-decomposition in secs: %f \n", (t2-t1)/1000000);
//Print_Matrix(A,N,N);
/* while (N-offset>Block){ */
/* stepLU(A,Block,offset,N); */
/* offset+=Block; */
/* } */
/* ProcessDiagonalBlock(&A[offset*N+offset], N-offset, N); */
//Print_Matrix(A,N,N);
#ifdef CHECK
/* PROOF OF CORRECTNESS */
for (i=0;i<N;i++)
for (j=0;j<N;j++)
if (i>j)
L[i*N+j] = A[i*N+j];
else
U[i*N+j] = A[i*N+j];
for (i=0;i<N;i++)
L[i*N+i] = 1;
//printf("L=\n");
//Print_Matrix(L,N,N);
//printf("U=\n");
//Print_Matrix(U,N,N);
for (i=0;i<N;i++)
for (j=0;j<N;j++){
temp2=0;
for (k=0;k<N;k++)
temp2+=L[i*N+k]*U[k*N+j];
if ((A2[i*N+j]-temp2)/A2[i*N+j] >0.1 || (A2[i*N+j]-temp2)/A2[i*N+j] <-0.1)
temp++;
}
printf("Errors = %d \n", temp);
#endif
return 0;
}
void stage1(double *A, int offset, int *sizedim, int *start, int N, int M)
{
// always start[0] is 0, so it is not used;
ProcessDiagonalBlock(&A[offset*N+offset], sizedim[0], N);
}
void stage2(double *A, int offset, int *sizedim, int *start, int N, int M)
{
int x=offset, y=offset;
int B = sizedim[0];
int i;
int L1 = sizedim[0];
int L2, L3;
/* Processing only one big block in column and row */
//ProcessBlockOnRow(&A[x*N+(B+y)], &A[x*N+y], B, N-(B+x), N);
//ProcessBlockOnColumn(&A[(B+x)*N+y], &A[x*N+y], B, N-(B+y), N);
for (i=1;i<M;i++){
L2 = sizedim[i];
L3 = sizedim[i];
#pragma omp task firstprivate(i, L1, L2,x, y, N)
ProcessBlockOnColumn(&A[(x+start[i])*N+y], &A[x*N+y], L1, L2, N);
#pragma omp task firstprivate(i, L1, L2,x, y, N)
ProcessBlockOnRow(&A[x*N+(y+start[i])], &A[x*N+y], L1, L3, N);
}
#pragma omp taskwait
}
void stage3(double *A, int offset, int *sizedim, int *start, int N, int M)
{
int x=offset, y=offset;
int B = sizedim[0];
int i,j;
int L1 = sizedim[0];
int L2, L3;
//ProcessInnerBlock(&A[(B+x)*N+(B+y)], &A[x*N+(B+y)], &A[(B+x)*N+y], B, N-(B+y), N-(B+x), N);
for (i=1;i<M;i++)
for (j=1;j<M;j++){
L2 = sizedim[i];
L3 = sizedim[j];
#pragma omp task firstprivate(i,j,M,N,x,y,L1,L2,L3)
ProcessInnerBlock(&A[(x+start[i])*N+(y+start[j])], &A[x*N+(y+start[j])], &A[(x+start[i])*N+y], L1, L2, L3, N);
}
#pragma omp taskwait
}
void ProcessDiagonalBlock(double *A, int L1, int N)
/* *A is a pointer to the block processed */
/* The size of the diagonal block is L1xL1 */
/* N is the size of the matrix in one dimension */
{
int i,j,k;
for (i=0;i<L1;i++)
for (j=i+1;j<L1;j++){
A[j*N+i]/=A[i*N+i];
/* DAXPY(&A[j*N+(i+1)],&A[i*N+(i+1)] ,-A[j*N+i],L1-(i+1),1); */
for (k=i+1;k<L1;k++)
A[j*N+k] = A[j*N+k] - A[j*N+i]*A[i*N+k];
}
}
void ProcessBlockOnColumn(double *A, double *D, int L1, int L2, int N)
/* The size of the column block is L2xL1 */
{
/* *A is a pointer to the column block processed */
/* *D is a pointer to the diagonal block required */
/* The size of the column block is L2xL1 */
/* The size of the diagonal block is L1xL1 */
int i,j,k;
for (i=0;i<L1;i++)
for (j=0;j<L2;j++){
A[j*N+i]/=D[i*N+i];
/* DAXPY(&A[j*N+(i+1)],&D[i*N+(i+1)],-A[j*N+i],L1-(i+1),1); */
for (k=i+1;k<L1;k++)
A[j*N+k]+=-A[j*N+i]*D[i*N+k];
}
}
void ProcessBlockOnRow(double *A, double *D, int L1, int L3, int N)
/* The size of the row block is L2xL1 */
{
/* *A is a pointer to the row block processed */
/* *D is a pointer to the diagonal block required */
/* The size of the row block is L1xL3 */
/* The size of the diagonal block is L1xL1 */
int i,j,k;
for (i=0;i<L1;i++)
for (j=i+1;j<L1;j++)
/* DAXPY(&A[N*j],&A[N*i],-D[j*N+i],L3,1); */
for (k=0;k<L3;k++)
A[j*N+k]+=-D[j*N+i]*A[i*N+k];
}
void ProcessInnerBlock(double *A, double *R, double *C, int L1, int L2, int L3, int N)
{
/* *A is a pointer to the inner block processed */
/* *R is a pointer to the row block required */
/* *C is a pointer to the column block required */
/* The size of the row block is L1xL3 */
/* The size of the column block is L2xL1 */
/* The size of the inner block is L2xL3 */
int i,j,k;
for (i=0;i<L1;i++)
for (j=0;j<L2;j++)
/* DAXPY(&A[N*j],&R[N*i],-C[j*N+i],L3,1); */
for (k=0;k<L3;k++)
A[j*N+k]+=-C[j*N+i]*R[i*N+k];
}
void Print_Matrix (double *v, int M, int N)
{
int i,j;
printf("\n");
for (i=0;i<M;i++){
for (j=0;j<N;j++)
printf("%.2f,",v[i*N+j]);
printf("\n");
}
printf("\n");
return;
}
/*void InitMatrix(double *A, int N)
{
long long i, j;
struct timeval InitTime;
gettimeofday(&InitTime, NULL);
srand(InitTime.tv_sec * 1000000 + InitTime.tv_usec);
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
A[i * N + j] = ( rand() - RAND_MAX/2) / (RAND_MAX/1000 );
if (i == j) {
A[i * N + i] *= 10;
}
//A[i*N+j]=i+j+1;
}
}
}
*/
void InitMatrix2(double *A, int N)
{
long long i, j,k;
for (i=0;i<N*N;i++)
A[i]=0;
for (k=0;k<N;k++)
for (i = k; i < N; i++)
for (j = k; j < N; j++)
A[i * N + j] +=1;
}
void InitMatrix3(double *A, int N)
{
long long i,j,k;
double *L, *U;
L = (double*) malloc(N*N*sizeof(double));
U = (double*) malloc(N*N*sizeof(double));
#pragma omp parallel
{
#pragma omp for private(i,j)
for (i=0;i<N;i++)
for (j=0;j<N;j++){
A[i*N+j]=0;
if (i>=j)
L[i*N+j] = i-j+1;
else
L[i*N+j] = 0;
if (i<=j)
U[i*N+j] = j-i+1;
else
U[i*N+j] = 0;
}
#pragma omp for private(i,j,k)
for (i=0;i<N;i++)
for (j=0;j<N;j++)
for (k=0;k<N;k++)
A[i*N+j]+=L[i*N+k]*U[k*N+j];
}
}
|
GB_binop__isle_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int8)
// A*D function (colscale): GB (_AxD__isle_int8)
// D*A function (rowscale): GB (_DxB__isle_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int8)
// C=scalar+B GB (_bind1st__isle_int8)
// C=scalar+B' GB (_bind1st_tran__isle_int8)
// C=A+scalar GB (_bind2nd__isle_int8)
// C=A'+scalar GB (_bind2nd_tran__isle_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_INT8 || GxB_NO_ISLE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isle_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB074-flush-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This benchmark is extracted from flush_nolist.1c of OpenMP Application
Programming Interface Examples Version 4.5.0 .
We added one critical section to make it a test with only one pair of data races.
The data race will not generate wrong result though. So the assertion always passes.
Data race pair: i@70:10 vs. i@71:11
*/
#include<stdio.h>
#include<assert.h>
void f1(int *q)
{
#pragma omp critical
*q = 1;
#pragma omp flush
}
int main()
{
int i=0, sum=0;
#pragma omp parallel reduction(+:sum) num_threads(10)
{
f1(&i);
sum+=i;
}
assert (sum==10);
printf("sum=%d\n", sum);
return 0;
}
|
ompGetNumThreads.c | /*
Contributed by pranav@ics.forth.gr
4/14/2010
*/
#include <stdio.h>
// The non-existence of omp.h is essential to repeat the original bug
//#include <omp.h>
int main()
{
int k;
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
return 0;
}
|
GB_unaryop__minv_int8_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int8_uint16
// op(A') function: GB_tran__minv_int8_uint16
// C type: int8_t
// A type: uint16_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int8_uint16
(
int8_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int8_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__one_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__one_fc64_fc64
// op(A') function: GB_unop_tran__one_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: ;
// unaryop: cij = GxB_CMPLX(1,0)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GxB_CMPLX(1,0) ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = GxB_CMPLX(1,0) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__one_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = GxB_CMPLX(1,0) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
; ;
; ;
Cx [p] = GxB_CMPLX(1,0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__one_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lock.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
#include <omp.h>
int main()
{
//need to use an OpenMP construct so that OMPT will be initalized
#pragma omp parallel num_threads(1)
print_ids(0);
omp_lock_t lock;
printf("%" PRIu64 ": &lock: %" PRIu64 "\n", ompt_get_thread_data()->value, (uint64_t) &lock);
omp_init_lock(&lock);
print_fuzzy_address(1);
omp_set_lock(&lock);
print_fuzzy_address(2);
omp_unset_lock(&lock);
print_fuzzy_address(3);
omp_destroy_lock(&lock);
print_fuzzy_address(4);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_nest_lock'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: &lock: [[WAIT_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_init_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_lock: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_lock: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_destroy_lock: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
return 0;
}
|
oyranos_cmm_oyra_image_channel.c | /** @file oyranos_cmm_oyra_image_channel.c
*
* Oyranos is an open source Color Management System
*
* @par Copyright:
* 2016-2018 (C) Kai-Uwe Behrmann
*
* @brief Channel selection module for Oyranos
* @internal
* @author Kai-Uwe Behrmann <ku.b@gmx.de>
* @par License:
* new BSD <http://www.opensource.org/licenses/BSD-3-Clause>
* @since 2016/04/04
*/
#include "oyCMMapi4_s.h"
#include "oyCMMapi7_s.h"
#include "oyCMMui_s.h"
#include "oyConnectorImaging_s.h"
#include "oyRectangle_s.h"
#include "oyRectangle_s_.h"
#include "oyranos_cmm.h"
#include "oyranos_cmm_oyra.h"
#include "oyranos_db.h"
#include "oyranos_helper.h"
#include "oyranos_i18n.h"
#include "oyranos_string.h"
#include <math.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef HAVE_POSIX
#include <stdint.h> /* UINT32_MAX */
#endif
#include <locale.h>
/* OY_IMAGE_CHANNEL_REGISTRATION */
/* OY_IMAGE_CHANNEL_REGISTRATION ----------------------------------------------*/
/** @brief implement oyCMMFilter_GetNext_f()
*
* The "channel" option is build of channel fields. It contains the
* output section in one text string each in squared brackets: "[a|b|c]".
* Each channel is separated by pipe sign '|' and can contain the channel
* symbol or a fill value. -1 indicates the module shall select a appropriate
* fill value. The counting of channels starts from a and ends with z,
* covering the range of ASCII a-z. A special case is a "" no op signature.
* Use it for pass through.
*
* With the above syntax it is possible to add or remove channels or simply
* switch channels of.
*
* switch the second and thierd channels of: ["a", -1, -1]
*
* swap first with thierd channel: ["c", "b". "a"]
*
* duplicate the second channel and skip the first and possible the c and
* more source channels: ["b", "b"]
*
* Note: changing the channel count might require a new ICC profile for the
* output image. Please setup the graph accordingly.
*
* @version Oyranos: 0.9.6
* @date 2016/04/04
* @since 2016/04/04 (Oyranos: 0.9.6)
*/
int oyraFilter_ImageChannelRun ( oyFilterPlug_s * requestor_plug,
oyPixelAccess_s * ticket )
{
int result = 0, error = 0;
oyFilterSocket_s * socket;
oyFilterNode_s * input_node = 0,
* node;
oyFilterPlug_s * plug = NULL;
oyImage_s * image;
int dirty = 0;
socket = oyFilterPlug_GetSocket( requestor_plug );
node = oyFilterSocket_GetNode( socket );
image = (oyImage_s*)oyFilterSocket_GetData( socket );
if(!image)
{
result = 1;
goto oyraFilter_ImageChannelRun_clean;
}
if(oy_debug)
oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"image [%d](%d)\n",OY_DBG_ARGS_,oyStruct_GetId((oyStruct_s*)image),oyImage_GetWidth(image) );
{
const char * channels_json;
oyOptions_s * node_opts = oyFilterNode_GetOptions( node, 0 );
oyjl_val json = 0;
char * t;
if(!node_opts)
dirty = 1;
if(dirty)
{
result = dirty;
goto oyraFilter_ImageChannelRun_clean2;
}
plug = oyFilterNode_GetPlug( node, 0 );
/* select node */
input_node = oyFilterNode_GetPlugNode( node, 0 );
/* find filters own channel factor */
channels_json = oyOptions_FindString( node_opts,
"//" OY_TYPE_STD "/channel/channel",
0 );
oyOptions_Release( &node_opts );
error = !channels_json;
if(error) {WARNc_S("found not \"channel\" option for filter");}
else if(oy_debug)
oyra_msg( oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"channels_json: \"%s\"",OY_DBG_ARGS_, channels_json);
if(!error && strlen(channels_json))
{
char * save_locale = 0;
/* sensible parsing */
save_locale = oyStringCopy_( setlocale( LC_NUMERIC, 0 ),
oyAllocateFunc_ );
setlocale( LC_NUMERIC, "C" );
t = oyAllocateFunc_(256);
json = oyjlTreeParse( channels_json, t, 256 );
if(t[0])
{
WARNc2_S( "channel option: %s: %s\n", _("found issues parsing JSON"), t );
error = 1;
}
oyFree_m_(t);
setlocale(LC_NUMERIC, save_locale);
if(save_locale)
oyFree_m_( save_locale );
}
if(oy_debug > 2)
oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"%s",OY_DBG_ARGS_, oyPixelAccess_Show(ticket));
if(channels_json && strlen(channels_json) > 2)
{
oyImage_s * output_image = oyPixelAccess_GetOutputImage( ticket );
oyArray2d_s * a_dest = oyPixelAccess_GetArray( ticket );
int layout_src = oyImage_GetPixelLayout( image, oyLAYOUT );
int layout_dst = oyImage_GetPixelLayout( output_image, oyLAYOUT );
int channels_src = oyToChannels_m( layout_src );
int channels_dst = oyToChannels_m( layout_dst );
int ticket_array_pix_width;
int count = oyjlValueCount( json ), i;
const int max_channels = 'z'-'a'+1;
double channel[max_channels+1];
int channel_pos[max_channels+1];
/* avoid division by zero */
if(!channels_src) channels_src = 1;
if(!channels_dst) channels_dst = 1;
ticket_array_pix_width = oyArray2d_GetWidth( a_dest ) / channels_dst;
memset( channel, 0, sizeof(double) * (max_channels+1) );
memset( channel_pos, 0, sizeof(int) * (max_channels+1) );
if(count > channels_dst)
{
WARNc3_S( "\"channel=%s\" option channel count %d exceeds destination image %d", channels_json, count, channels_dst );
error = 1;
}
/* parse the "channel" option as JSON string */
if(!error)
for(i = 0; i < count && !error; ++i)
{
oyjl_val v = oyjlValuePosGet( json, i );
if( OYJL_IS_NUMBER(v) ||
OYJL_IS_DOUBLE(v) )
{
channel[i] = OYJL_GET_DOUBLE( v );
if(channel[i] == -1)
channel[i] = 0.5;
channel_pos[i] = -1;
} else if( OYJL_IS_STRING( v ) )
{
const char * p = OYJL_GET_STRING( v );
channel_pos[i] = p[0] - 'a';
if(channel_pos[i] >= channels_src)
{
WARNc2_S( "channel position %d not available in source image %d", channel_pos[i], channels_src );
error = 1;
}
}
}
oyjlTreeFree( json );
if(!error)
{
int w,h,x,y, start_x,start_y, max_value = -1;
oyRectangle_s * ticket_roi = oyPixelAccess_GetArrayROI( ticket );
oyRectangle_s_ roi_= {oyOBJECT_RECTANGLE_S,0,0,0, 0,0,0,0};
oyRectangle_s * roi = (oyRectangle_s*)&roi_;
oyArray2d_s * array_out;
uint8_t ** array_out_data;
/* get pixel layout infos for copying */
oyDATATYPE_e data_type_out = oyToDataType_m( layout_dst );
int bps_out = oyDataTypeGetSize( data_type_out );
/* get the source pixels */
result = oyFilterNode_Run( input_node, plug, ticket );
/* get the channel buffers */
array_out = oyPixelAccess_GetArray( ticket );
array_out_data = oyArray2d_GetData( array_out );
w = oyArray2d_GetWidth( array_out ) / channels_dst;
h = oyArray2d_GetHeight( array_out );
switch(data_type_out)
{
case oyUINT8:
max_value = 255;
break;
case oyUINT16:
max_value = 65535;
break;
case oyUINT32:
max_value = UINT32_MAX;
break;
case oyHALF:
case oyFLOAT:
case oyDOUBLE:
max_value = 1.0;
break;
}
oyRectangle_SetByRectangle( roi, ticket_roi );
oyRectangle_Scale( roi, ticket_array_pix_width );
start_x = OY_ROUND(roi_.x);
start_y = OY_ROUND(roi_.y);
/* copy the channels */
#if defined(USE_OPENMP)
#pragma omp parallel for private(x,y,i)
#endif
for(y = start_y; y < h; ++y)
{
for(x = start_x; x < w; ++x)
{
union u8421 { uint32_t u4; uint16_t u2; uint8_t u1; float f; double d; };
union u8421 cache[max_channels];
float flt;
uint32_t u4;
/* fill the intermediate pixel cache;
* It is not known which channels are needed and in which order.
* Thus all channels are stored outside the main buffer.
*/
for(i = 0; i < count; ++i)
{
int pos = (channel_pos[i] == -1) ? i : channel_pos[i];
switch(data_type_out)
{
case oyUINT8:
cache[i].u1 = (channel_pos[i] == -1) ? OY_ROUND(channel[i] * max_value) : array_out_data[y][x*channels_dst*bps_out + pos*bps_out];
break;
case oyUINT16:
cache[i].u2 = (channel_pos[i] == -1) ? OY_ROUND(channel[i] * max_value) : *((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + pos*bps_out]);
break;
case oyUINT32:
cache[i].u4 = (channel_pos[i] == -1) ? (uint32_t) OY_ROUND(channel[i] * max_value) : *((uint32_t*)&array_out_data[y][x*channels_dst*bps_out + pos*bps_out]);
break;
case oyHALF:
flt = channel[i] * max_value;
memcpy( &u4, &flt, 4 );
cache[i].u2 = (channel_pos[i] == -1) ? OY_FLOAT2HALF(u4) : *((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + pos*bps_out]);
break;
case oyFLOAT:
cache[i].f = (channel_pos[i] == -1) ? channel[i] * max_value : *((float*)&array_out_data[y][x*channels_dst*bps_out + pos*bps_out]);
break;
case oyDOUBLE:
cache[i].d = (channel_pos[i] == -1) ? channel[i] * max_value : *((double*)&array_out_data[y][x*channels_dst*bps_out + pos*bps_out]);
break;
}
}
/* read back all scattered channels */
for(i = 0; i < count; ++i)
{
int pos = i;
switch(data_type_out)
{
case oyUINT8:
array_out_data[y][x*channels_dst*bps_out + i*bps_out] = cache[pos].u1;
break;
case oyUINT16:
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = cache[pos].u2;
break;
case oyUINT32:
*((uint32_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = cache[pos].u4;
break;
case oyHALF:
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = cache[pos].u2;
break;
case oyFLOAT:
*((float*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = cache[pos].f;
break;
case oyDOUBLE:
*((double*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = cache[pos].d;
break;
}
}
}
}
oyArray2d_Release( &array_out );
}
oyImage_Release( &output_image );
} else /* nothing to do */
result = oyFilterNode_Run( input_node, plug, ticket );
oyraFilter_ImageChannelRun_clean2:
oyFilterPlug_Release( &plug );
oyFilterNode_Release( &input_node );
}
oyraFilter_ImageChannelRun_clean:
oyImage_Release( &image );
oyFilterSocket_Release( &socket );
oyFilterNode_Release( &node );
return result;
}
#define OY_IMAGE_CHANNEL_REGISTRATION OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH "channel"
/** @brief oyra oyCMMapi7_s implementation
*
* a filter providing a channel image filter
*
* @version Oyranos: 0.9.6
* @date 2016/04/04
* @since 2016/04/04 (Oyranos: 0.9.6)
*/
oyCMMapi_s * oyraApi7ImageChannelCreate(void)
{
oyCMMapi7_s * channel7;
int32_t cmm_version[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C},
module_api[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C};
static oyDATATYPE_e data_types[7] = {oyUINT8, oyUINT16, oyUINT32,
oyHALF, oyFLOAT, oyDOUBLE, 0};
oyConnectorImaging_s * plug = oyConnectorImaging_New(0),
* socket = oyConnectorImaging_New(0);
static oyConnectorImaging_s * plugs[2] = {0,0},
* sockets[2] = {0,0};
plugs[0] = plug;
sockets[0] = socket;
oyConnectorImaging_SetDataTypes( plug, data_types, 6 );
oyConnectorImaging_SetReg( plug, "//" OY_TYPE_STD "/manipulator.data" );
oyConnectorImaging_SetMatch( plug, oyFilterSocket_MatchImagingPlug );
oyConnectorImaging_SetTexts( plug, oyCMMgetImageConnectorPlugText,
oy_image_connector_texts );
oyConnectorImaging_SetIsPlug( plug, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_OFFSET, -1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MIN_CHANNELS_COUNT, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_CHANNELS_COUNT, 255 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MIN_COLOR_COUNT, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_COUNT, 255 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_INTERWOVEN, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_PREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_NONPREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_ID, 1 );
oyConnectorImaging_SetDataTypes( socket, data_types, 6 );
oyConnectorImaging_SetReg( socket, "//" OY_TYPE_STD "/manipulator.data" );
oyConnectorImaging_SetMatch( socket, oyFilterSocket_MatchImagingPlug );
oyConnectorImaging_SetTexts( socket, oyCMMgetImageConnectorSocketText,
oy_image_connector_texts );
oyConnectorImaging_SetIsPlug( socket, 0 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_OFFSET, -1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MIN_CHANNELS_COUNT, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_CHANNELS_COUNT, 255 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MIN_COLOR_COUNT, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_COUNT, 255 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_INTERWOVEN, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_PREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_NONPREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_ID, 1 );
channel7 = oyCMMapi7_Create ( oyraCMMInit, oyraCMMMessageFuncSet,
OY_IMAGE_CHANNEL_REGISTRATION,
cmm_version, module_api,
NULL,
oyraFilter_ImageChannelRun,
(oyConnector_s**)plugs, 1, 0,
(oyConnector_s**)sockets, 1, 0,
0, 0 );
return (oyCMMapi_s*) channel7;
}
const char * oyraApi4UiImageChannelGetText (
const char * select,
oyNAME_e type,
oyStruct_s * context OY_UNUSED )
{
if(strcmp(select,"name") == 0)
{
if(type == oyNAME_NICK)
return "image_channel";
else if(type == oyNAME_NAME)
return _("Image[channel]");
else if(type == oyNAME_DESCRIPTION)
return _("Channel Image Filter Object");
} else if(strcmp(select,"help") == 0)
{
if(type == oyNAME_NICK)
return "help";
else if(type == oyNAME_NAME)
return _("The filter is used to reduce pixels.");
else if(type == oyNAME_DESCRIPTION)
{
static char * help_desc = NULL;
if(!help_desc)
oyStringAddPrintf( &help_desc, 0,0, "%s\n",
_("The filter will expect a \"channel\" double option and will create, fill and process a according data version with a new job ticket. The new job tickets image, array and output_array_roi will be divided by the supplied \"channel\" factor. It's plug will request the divided image sizes from the source socket.") );
return help_desc;
}
} else if(strcmp(select,"category") == 0)
{
if(type == oyNAME_NICK)
return "category";
else if(type == oyNAME_NAME)
return _("Image/Simple Image[channel]");
else if(type == oyNAME_DESCRIPTION)
return _("The filter is used to reduce pixels.");
}
return 0;
}
/** @brief oyra oyCMMapi4_s implementation
*
* a filter providing a channel image filter
*
* @version Oyranos: 0.9.6
* @date 2016/04/04
* @since 2016/04/04 (Oyranos: 0.9.6)
*/
oyCMMapi_s * oyraApi4ImageChannelCreate(void)
{
static const char * oyra_api4_ui_image_channel_texts[] = {"name", "help", "category", 0};
oyCMMui_s * ui = oyCMMui_Create( "Image/Simple Image[channel]", /* category */
oyraApi4UiImageChannelGetText,
oyra_api4_ui_image_channel_texts, 0 );
int32_t cmm_version[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C},
module_api[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C};
oyCMMapi4_s * channel4 = oyCMMapi4_Create( oyraCMMInit, oyraCMMMessageFuncSet,
OY_IMAGE_CHANNEL_REGISTRATION,
cmm_version, module_api,
NULL,
NULL,
NULL,
ui,
NULL );
return (oyCMMapi_s*)channel4;
}
/* OY_IMAGE_CHANNEL_REGISTRATION ----------------------------------------------*/
/* ---------------------------------------------------------------------------*/
|
convolution_1x1_pack1to4_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack1to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack1to4_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_sgemm_pack1to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2 * outw + w;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const __fp16* r0 = bottom_blob.channel(p);
__fp16* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
outptr[0] = r0[0];
r0 += 2;
outptr += 1;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack1to4_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
GB_binop__first_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__first_fp32
// A.*B function (eWiseMult): GB_AemultB__first_fp32
// A*D function (colscale): GB_AxD__first_fp32
// D*A function (rowscale): GB_DxB__first_fp32
// C+=B function (dense accum): GB_Cdense_accumB__first_fp32
// C+=b function (dense accum): GB_Cdense_accumb__first_fp32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_fp32
// C=scalar+B GB_bind1st__first_fp32
// C=scalar+B' GB_bind1st_tran__first_fp32
// C=A+scalar (none)
// C=A'+scalar (none)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = aij
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = x ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_FP32 || GxB_NO_FIRST_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__first_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__first_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__first_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__first_fp32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__first_fp32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__first_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__first_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__first_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB_bind1st_tran__first_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
openmp_hello.c | /******************************************************************************
// https://computing.llnl.gov/tutorials/openMP/samples/C/omp_hello.c
* FILE: omp_hello.c
* DESCRIPTION:
* OpenMP Example - Hello World - C/C++ Version
* In this simple example, the master thread forks a parallel region.
* All threads in the team obtain their unique thread number and print it.
* The master thread only prints the total number of threads. Two OpenMP
* library routines are used to obtain the number of threads and each
* thread's number.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 04/06/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (void)
{
int nthreads, tid;
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel private(nthreads, tid)
{
/* Obtain thread number */
tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
/* Only master thread does this */
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} /* All threads join master thread and disband */
return 0;
}
|
eval_template.c | // ${_warning_in_the_generated_file_not_to_edit}
//#include <math.h>
#include "power.c"
//#include <stdio.h>
%if SIZE_T == 'size_t':
#include <stdlib.h> /* size_t */
%endif
#include "newton_interval.h"
#define BREAKEVEN 100 // TODO: determine a typical value for this
// we only have integer exponents in pow, use specialized verion:
#define pow(arg1, arg2) power(arg1, arg2)
%for token in tokens:
%for wy in range(1, max_wy+1):
%for i in range(max_deriv[wy]+1):
double ${token}_scalar_${wy}_${i}(
const double x, const double * const restrict c, const ${SIZE_T} offset)
{
% for cse_token, cse_def in eval_cse[token][wy][i]:
const double ${cse_token} = ${cse_def};
% endfor
return ${eval_expr[token][wy][i][0]};
}
void ${token}_eval_${wy}_${i}(
const ${SIZE_T} nx,
const double * const restrict x,
const double * const restrict c,
const ${SIZE_T} nout,
const double * const restrict xout,
double * const restrict yout
)
{
// derivative = 0 evaluates function value, 1 evaluates first
// derivative and so on..
${SIZE_T} xi = nx; // max: nx-1, nx considered "uninitialized"
#pragma omp parallel for firstprivate(xi) schedule(static) if (nout > BREAKEVEN)
for (${SIZE_T} oi=0; oi<nout; ++oi){
// Set xi
if (xi == nx){ // xi == nx considered uninitialized!
xi = get_interval(x, nx, xout[oi]);
if (xi == -1)
xi = 0;
}
else{
xi = get_interval_from_guess(x, nx, xout[oi], xi);
if (xi == -1)
xi = 0;
}
// Calculate value of yout[oi] at xout[oi]
// for shifted coefficients.
yout[oi] = ${token}_scalar_${wy}_${i}(xout[oi]-x[xi], c, xi*${wy}*2);
}
}
%endfor
%endfor
%endfor
#undef pow(arg1, arg2)
|
bml_norm_dense_typed.c | #ifdef BML_USE_MAGMA
#include "magma_v2.h"
#include "../bml_allocate.h"
#endif
#include "../../macros.h"
#include "../../typed.h"
#include "../blas.h"
#include "../bml_norm.h"
#include "../bml_parallel.h"
#include "../bml_types.h"
#include "bml_norm_dense.h"
#include "bml_types_dense.h"
#include <complex.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Calculate the sum of squares of all the elements of a matrix.
*
* \ingroup norm_group
*
* \param A The matrix
* \return The sum of squares of A
*/
double TYPED_FUNC(
bml_sum_squares_dense) (
bml_matrix_dense_t * A)
{
int N = A->N;
REAL_T sum = 0.0;
#ifdef BML_USE_MAGMA
#if defined(SINGLE_COMPLEX) || defined(DOUBLE_COMPLEX)
MAGMA_T tsum = MAGMACOMPLEX(MAKE) (0., 0.);
for (int i = 0; i < N; i++)
{
tsum =
MAGMACOMPLEX(ADD) (tsum,
MAGMA(dotu) (N,
(MAGMA_T *) A->matrix + i * A->ld,
1,
(MAGMA_T *) A->matrix + i * A->ld,
1, A->queue));
}
sum = MAGMACOMPLEX(REAL) (tsum) + I * MAGMACOMPLEX(IMAG) (tsum);
#else
for (int i = 0; i < N; i++)
{
sum += MAGMA(dot) (N, (MAGMA_T *) A->matrix + i * A->ld, 1,
(MAGMA_T *) A->matrix + i * A->ld, 1, A->queue);
}
#endif
#else
REAL_T *A_matrix = A->matrix;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
int myRank = bml_getMyRank();
#pragma omp parallel for \
shared(N, A_matrix) \
shared(A_localRowMin, A_localRowMax, myRank) \
reduction(+:sum)
for (int i = A_localRowMin[myRank] * N; i < A_localRowMax[myRank] * N;
i++)
{
sum += A_matrix[i] * A_matrix[i];
}
#endif
return (double) REAL_PART(sum);
}
/** Calculate the sum of squares of all the core elements of a submatrix.
*
* \ingroup norm_group
*
* \param A The matrix
* \param core_size Number of core rows
* \return The sum of squares of A
*/
double TYPED_FUNC(
bml_sum_squares_submatrix_dense) (
bml_matrix_dense_t * A,
int core_size)
{
int N = A->N;
REAL_T sum = 0.0;
#ifdef BML_USE_MAGMA
#if defined(SINGLE_COMPLEX) || defined(DOUBLE_COMPLEX)
MAGMA_T tsum = MAGMACOMPLEX(MAKE) (0., 0.);
for (int i = 0; i < core_size; i++)
{
tsum =
MAGMACOMPLEX(ADD) (tsum,
MAGMA(dotu) (N,
(MAGMA_T *) A->matrix + i * A->ld,
1,
(MAGMA_T *) A->matrix + i * A->ld,
1, A->queue));
}
sum = MAGMACOMPLEX(REAL) (tsum) + I * MAGMACOMPLEX(IMAG) (tsum);
#else
for (int i = 0; i < core_size; i++)
{
sum += MAGMA(dot) (N, (MAGMA_T *) A->matrix + i * A->ld, 1,
(MAGMA_T *) A->matrix + i * A->ld, 1, A->queue);
}
#endif
#else
REAL_T *A_matrix = A->matrix;
#pragma omp parallel for \
shared(N, A_matrix) \
reduction(+:sum)
for (int i = 0; i < core_size * N; i++)
{
sum += A_matrix[i] * A_matrix[i];
}
/*
for (int i = 0; i < core_size; i++)
{
//for (int j = 0; j < core_size; j++)
for (int j = 0; j < N; j++)
{
REAL_T value = A_matrix[ROWMAJOR(i, j, N, N)];
sum += value * value;
}
}
*/
#endif
return (double) REAL_PART(sum);
}
/** Calculate the sum of all elements of \alpha A(i,j) * B(i,j).
*
* \ingroup norm_group
*
* \param A The matrix A
* \param B The matrix B
* \param alpha Multiplier for A
* \param threshold Threshold
* \return The sum of squares of all elements of \alpha A(i,j) * B(i,j)
*/
double TYPED_FUNC(
bml_sum_AB_dense) (
bml_matrix_dense_t * A,
bml_matrix_dense_t * B,
double alpha,
double threshold)
{
int N = A->N;
REAL_T sum = 0.0;
#ifdef BML_USE_MAGMA //do work on CPU for now...
MAGMA_T *A_matrix = bml_allocate_memory(sizeof(MAGMA_T) * A->N * A->N);
MAGMA(getmatrix) (A->N, A->N, A->matrix, A->ld, A_matrix, A->N, A->queue);
MAGMA_T *B_matrix = bml_allocate_memory(sizeof(MAGMA_T) * B->N * B->N);
MAGMA(getmatrix) (B->N, B->N, B->matrix, B->ld, B_matrix, B->N, B->queue);
#else
REAL_T *A_matrix = A->matrix;
REAL_T *B_matrix = B->matrix;
#endif
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
#ifdef BML_USE_MAGMA
MAGMA_T alpha_ = MAGMACOMPLEX(MAKE) (alpha, 0.);
#else
REAL_T alpha_ = (REAL_T) alpha;
#endif
int myRank = bml_getMyRank();
#pragma omp parallel for \
shared(alpha_) \
shared(N, A_matrix, B_matrix) \
shared(A_localRowMin, A_localRowMax, myRank) \
reduction(+:sum)
//for (int i = 0; i < N * N; i++)
for (int i = A_localRowMin[myRank] * N; i < A_localRowMax[myRank] * N;
i++)
{
#ifdef BML_USE_MAGMA
MAGMA_T ttemp =
MAGMACOMPLEX(MUL) (MAGMACOMPLEX(MUL) (alpha_, A_matrix[i]),
B_matrix[i]);
REAL_T temp =
MAGMACOMPLEX(REAL) (ttemp) + I * MAGMACOMPLEX(IMAG) (ttemp);
#else
REAL_T temp = alpha_ * A_matrix[i] * B_matrix[i];
#endif
if (ABS(temp) > threshold)
sum += temp; //* temp;
}
#ifdef BML_USE_MAGMA
free(A_matrix);
free(B_matrix);
#endif
return (double) REAL_PART(sum);
}
/** Calculate the sum of squares of all elements of \alpha A + \beta B.
*
* \ingroup norm_group
*
* \param A The matrix A
* \param B The matrix B
* \param alpha Multiplier for A
* \param beta Multiplier for B
* \param threshold Threshold
* \return The sum of squares of all elements of \alpha A + \beta BB
*/
double TYPED_FUNC(
bml_sum_squares2_dense) (
bml_matrix_dense_t * A,
bml_matrix_dense_t * B,
double alpha,
double beta,
double threshold)
{
int N = A->N;
REAL_T sum = 0.0;
#ifdef BML_USE_MAGMA //do work on CPU for now...
MAGMA_T *A_matrix = bml_allocate_memory(sizeof(MAGMA_T) * A->N * A->N);
MAGMA(getmatrix) (A->N, A->N, A->matrix, A->ld, A_matrix, A->N, A->queue);
MAGMA_T *B_matrix = bml_allocate_memory(sizeof(MAGMA_T) * B->N * B->N);
MAGMA(getmatrix) (B->N, B->N, B->matrix, B->ld, B_matrix, B->N, B->queue);
#else
REAL_T *A_matrix = A->matrix;
REAL_T *B_matrix = B->matrix;
#endif
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
#ifdef BML_USE_MAGMA
MAGMA_T alpha_ = MAGMACOMPLEX(MAKE) (alpha, 0.);
MAGMA_T beta_ = MAGMACOMPLEX(MAKE) (beta, 0.);
#else
REAL_T alpha_ = (REAL_T) alpha;
REAL_T beta_ = (REAL_T) beta;
#endif
int myRank = bml_getMyRank();
#pragma omp parallel for \
shared(alpha_, beta_) \
shared(N, A_matrix, B_matrix) \
shared(A_localRowMin, A_localRowMax, myRank) \
reduction(+:sum)
//for (int i = 0; i < N * N; i++)
for (int i = A_localRowMin[myRank] * N; i < A_localRowMax[myRank] * N;
i++)
{
#ifdef BML_USE_MAGMA
MAGMA_T ttemp =
MAGMACOMPLEX(ADD) (MAGMACOMPLEX(MUL) (alpha_, A_matrix[i]),
MAGMACOMPLEX(MUL) (beta_, B_matrix[i]));
REAL_T temp =
MAGMACOMPLEX(REAL) (ttemp) + I * MAGMACOMPLEX(IMAG) (ttemp);
#else
REAL_T temp = alpha_ * A_matrix[i] + beta_ * B_matrix[i];
#endif
if (ABS(temp) > threshold)
sum += temp * temp;
}
#ifdef BML_USE_MAGMA
free(A_matrix);
free(B_matrix);
#endif
return (double) REAL_PART(sum);
}
/** Calculate the Frobenius norm of a matrix.
*
* \ingroup norm_group
*
* \param A The matrix
* \return The Frobenius norm of A
*/
double TYPED_FUNC(
bml_fnorm_dense) (
bml_matrix_dense_t * A)
{
double sum = 0.0;
//#ifdef BML_USE_MAGMA
// int lwork=A->N;
// MAGMA_T *dwork;
// MAGMA(malloc)(&dwork,lwork);
// sum = MAGMABLAS(lange)(MagmaFrobeniusNorm, A->N, A->N, A->matrix, A->ld,
// dwork, lwork, A->queue);
//#else
sum = TYPED_FUNC(bml_sum_squares_dense) (A);
#ifdef DO_MPI
if (bml_getNRanks() > 1 && A->distribution_mode == distributed)
{
bml_sumRealReduce(&sum);
}
#endif
//#endif
return (double) REAL_PART(sqrt(sum));
}
/** Calculate the Frobenius norm of 2 matrices.
*
* \ingroup norm_group
*
* \param A The matrix A
* \param A The matrix B
* \return The Frobenius norm of A-B
*/
double TYPED_FUNC(
bml_fnorm2_dense) (
bml_matrix_dense_t * A,
bml_matrix_dense_t * B)
{
int N = A->N;
REAL_T *A_matrix = (REAL_T *) A->matrix;
REAL_T *B_matrix = (REAL_T *) B->matrix;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
double fnorm = 0.0;
REAL_T temp;
int myRank = bml_getMyRank();
#pragma omp parallel for \
shared(temp) \
shared(N, A_matrix, B_matrix) \
shared(A_localRowMin, A_localRowMax, myRank) \
reduction(+:fnorm)
//for (int i = 0; i < N*N;i++)
for (int i = A_localRowMin[myRank] * N; i < A_localRowMax[myRank] * N;
i++)
{
temp = A_matrix[i] - B_matrix[i];
fnorm += temp * temp;
}
#ifdef DO_MPI
if (bml_getNRanks() > 1 && A->distribution_mode == distributed)
{
bml_sumRealReduce(&fnorm);
}
#endif
return (double) REAL_PART(sqrt(fnorm));
}
|
patchmatch_stereo_impl.h | /*
* Copyright (C) 2020, unclearness
* All rights reserved.
*
*
* Implementation of the following paper
*
* Bleyer, Michael, Christoph Rhemann, and Carsten Rother. "PatchMatch
* Stereo-Stereo Matching with Slanted Support Windows." Bmvc. Vol. 11. 2011.
*/
#include <functional>
#include <random>
#include "ugu//util/math_util.h"
#include "ugu/stereo/base.h"
#include "ugu/timer.h"
#include "ugu/util/image_util.h"
namespace {
inline void RecoverNormalFromPlane(const ugu::Vec3f& p, ugu::Vec3f* n) {
(*n)[2] = -1.0f / std::sqrt((p[0] * p[0] + p[1] * p[1] + 1.0f));
(*n)[0] = -p[0] * (*n)[2];
(*n)[1] = -p[1] * (*n)[2];
}
inline bool ValidatePlane(int x, int y, const ugu::Vec3f& plane,
int half_patch_size, int width, bool is_right) {
ugu::Vec3f normal;
RecoverNormalFromPlane(plane, &normal);
if (normal[2] > 0) {
return false;
}
float len = std::sqrt(normal[0] * normal[0] + normal[1] * normal[1] +
normal[2] * normal[2]);
if (std::abs(len - 1.0f) > 0.001f) {
return false;
}
float d = x * plane[0] + y * plane[1] + plane[2];
if (!ugu::ValidateDisparity(x, d, half_patch_size, width, is_right)) {
return false;
}
return true;
}
inline bool AssertPlaneImage(const ugu::Image3f& plane, int half_patch_size,
bool is_right) {
#if _DEBUG
for (int j = half_patch_size; j < plane.rows - half_patch_size; j++) {
for (int i = half_patch_size; i < plane.cols - half_patch_size; i++) {
const auto& p = plane.at<ugu::Vec3f>(j, i);
if (!ValidatePlane(i, j, p, half_patch_size, plane.cols, is_right)) {
printf("%d (%f %f %f) %d %d %d\n", i, p[0], p[1], p[2], half_patch_size,
plane.cols, is_right);
ugu::Vec3f normal;
RecoverNormalFromPlane(p, &normal);
float len = std::sqrt(normal[0] * normal[0] + normal[1] * normal[1] +
normal[2] * normal[2]);
printf("(%f %f %f) %f\n", normal[0], normal[1], normal[2], len);
float d = i * p[0] + j * p[1] + p[2];
printf("%f %d\n", d,
ugu::ValidateDisparity(i, d, half_patch_size, plane.cols,
is_right));
assert(false);
return false;
}
}
}
#else
(void)plane;
(void)half_patch_size;
(void)is_right;
#endif
return true;
}
} // namespace
namespace ugu {
using PlaneImage = Image3f;
inline float L1(const Vec3b& a, const Vec3b& b) {
int val = 0;
val += std::abs(a[0] - b[0]);
val += std::abs(a[1] - b[1]);
val += std::abs(a[2] - b[2]);
return static_cast<float>(val);
}
struct Correspondence {
// first2second[y][second_x] -> first_x list
std::vector<std::vector<std::vector<int>>> first2second;
void Update(const Image1f& disparity2, const int half_patch_size) {
// Init
if (first2second.size() != disparity2.rows) {
first2second.clear();
first2second.resize(disparity2.rows);
std::for_each(first2second.begin(), first2second.end(),
[&](std::vector<std::vector<int>>& col) {
col.resize(disparity2.cols);
});
} else {
for (int j = 0; j < disparity2.rows; j++) {
for (int i = 0; i < disparity2.cols; i++) {
first2second[j][i].clear();
}
}
}
for (int j = 0; j < disparity2.rows; j++) {
for (int i = 0; i < disparity2.cols; i++) {
float d = disparity2.at<float>(j, i);
#if 1
float rx = std::round(i - d);
rx = std::max(std::min(rx, static_cast<float>(disparity2.cols - 1 -
half_patch_size)),
static_cast<float>(half_patch_size));
int rx_i = static_cast<int>(rx);
first2second[j][rx_i].push_back(i);
#else
{
float rx = std::floor(i - d);
rx = std::max(std::min(rx, static_cast<float>(disparity2.cols - 1 -
half_patch_size)),
static_cast<float>(half_patch_size));
int rx_i = static_cast<int>(rx);
first2second[j][rx_i].push_back(i);
}
{
float rx = std::ceil(i - d);
rx = std::max(std::min(rx, static_cast<float>(disparity2.cols - 1 -
half_patch_size)),
static_cast<float>(half_patch_size));
int rx_i = static_cast<int>(rx);
first2second[j][rx_i].push_back(i);
}
#endif
}
}
}
void Get(int x, int y, std::vector<int>* match_x_list) const {
match_x_list->clear();
std::copy(first2second[y][x].begin(), first2second[y][x].end(),
std::back_inserter(*match_x_list));
}
};
inline float Rho(int lx, int y, float rx, const Image3b& right,
const Image1f& left_grad, const Image1f& right_grad,
float alpha, float tau_col, float tau_grad,
const Vec3b& left_y_lx) {
#if 1
int rxmin = static_cast<int>(std::floor(rx));
int rxmax = rxmin + 1;
float w[2] = {rx - rxmin, rxmax - rx};
float l1_color0 = L1(left_y_lx, right.at<Vec3b>(y, rxmin));
float l1_color1 = L1(left_y_lx, right.at<Vec3b>(y, rxmax));
float l1_color = w[0] * l1_color0 + w[1] * l1_color1;
float color_term = (1.0f - alpha) * std::min(l1_color, tau_col);
const float& lg = left_grad.at<float>(y, lx);
float absdiff_grad0 = std::abs(lg - right_grad.at<float>(y, rxmin));
float absdiff_grad1 = std::abs(lg - right_grad.at<float>(y, rxmax));
float absdiff_grad = w[0] * absdiff_grad0 + w[1] * absdiff_grad1;
float grad_term = alpha * std::min(absdiff_grad, tau_grad);
#else
float rx_i = static_cast<int>(std::round(rx));
float l1_color = L1(left_y_lx, right.at<Vec3b>(y, rx_i));
float color_term = (1.0f - alpha) * std::min(l1_color, tau_col);
const float& lg = left_grad.at<float>(y, lx);
float absdiff_grad = std::abs(lg - right_grad.at<float>(y, rx_i));
float grad_term = alpha * std::min(absdiff_grad, tau_grad);
#endif // 0
return color_term + grad_term;
}
inline float CalcPatchMatchCost(
const Vec3f& plane, const Image3b& left, const Image3b& right,
const Image1f& left_grad, const Image1f& right_grad, int posx, int posy,
int minx, int maxx, int miny, int maxy, float gamma, float alpha,
float tau_col, float tau_grad, int half_patch_size,
float early_terminate_th = std::numeric_limits<float>::max()) {
const float inverse_gamma = 1.0f / gamma;
float cost_val = 0.0f;
const Vec3b& lc = left.at<Vec3b>(posy, posx);
const float width = static_cast<float>(left.cols - 1 - half_patch_size);
// For l1 skip
const float w_skip = 0.01f;
const float l1_skip_th = -std::log(w_skip) * gamma;
//#pragma omp parallel for reduction(+:cost_val)
for (int j = miny; j <= maxy; j++) {
for (int i = minx; i <= maxx; i++) {
// Question: Take color of the same image. Is this right?
const Vec3b& rc = left.at<Vec3b>(j, i);
float l1 = L1(lc, rc);
// Skip if l1 is too big -> w is small
// -> w * rho is zero -> cost_val doesn't change
if (l1_skip_th < l1) {
continue;
}
float w = std::exp(-l1 * inverse_gamma);
float d = plane[0] * i + plane[1] * j + plane[2];
float rx = static_cast<float>(i - d);
// rx may be outside of image.
// Because original plane is defined for (posx, posy)
// but we are evaluating its surrounding pixels.
// TODO: better guard
rx = std::max(std::min(rx, width), static_cast<float>(half_patch_size));
float rho_val = Rho(i, j, rx, right, left_grad, right_grad, alpha,
tau_col, tau_grad, rc);
cost_val += (w * rho_val);
}
if (early_terminate_th < cost_val) {
// ugu::LOGI("early stopped %f < %f (%d < %d < %d)\n", early_terminate_th,
// cost_val, miny, j, maxy);
return cost_val;
}
}
return cost_val;
}
inline bool CalcPatchMatchCost(const Image3f& plane, const Image3b& left,
const Image3b& right, const Image1f& left_grad,
const Image1f& right_grad, Image1f* cost,
const PatchMatchStereoParam& param) {
Timer<> timer;
timer.Start();
const int half_ps = param.patch_size / 2;
#ifdef UGU_USE_OPENMP
#pragma omp parallel for
#endif
for (int j = half_ps; j < plane.rows - half_ps; j++) {
for (int i = half_ps; i < plane.cols - half_ps; i++) {
int minx = i - half_ps;
int maxx = i + half_ps;
int miny = j - half_ps;
int maxy = j + half_ps;
const Vec3f& p = plane.at<Vec3f>(j, i);
float c = CalcPatchMatchCost(
p, left, right, left_grad, right_grad, i, j, minx, maxx, miny, maxy,
param.gamma, param.alpha, param.tau_col, param.tau_grad, half_ps);
cost->at<float>(j, i) = c;
}
}
timer.End();
ugu::LOGI("CalcPatchMatchCost: %.2f ms\n", timer.elapsed_msec());
return true;
}
inline bool InitPlaneRandom(Image3f* plane_image, Image1f* disparity,
std::default_random_engine& engine,
float initial_random_disparity_range,
bool fronto_parallel, int half_patch_size,
bool is_right, float theta_deg_min = 0.0f,
float theta_deg_max = 75.0f,
float phi_deg_min = 0.0f,
float phi_deg_max = 360.0f) {
Timer<> timer;
timer.Start();
initial_random_disparity_range =
initial_random_disparity_range > 0.0f
? initial_random_disparity_range
: static_cast<float>(plane_image->rows - 1);
std::uniform_real_distribution<float> theta_dist(radians(theta_deg_min),
radians(theta_deg_max));
std::uniform_real_distribution<float> phi_dist(radians(phi_deg_min),
radians(phi_deg_max));
std::vector<std::uniform_real_distribution<float>> disparity_dists(
plane_image->cols);
const float eps = 0.01f;
for (int i = half_patch_size; i < plane_image->cols - half_patch_size; i++) {
float disparity_max = 1.0f;
// Maximaum disparity depends on left or right
if (is_right) {
disparity_max = std::min(initial_random_disparity_range,
static_cast<float>(plane_image->cols - i -
half_patch_size + 1 - eps));
} else {
disparity_max =
std::min(initial_random_disparity_range,
static_cast<float>(i - half_patch_size + 1 - eps));
}
std::uniform_real_distribution<float> disparity_dist(0.000001f,
disparity_max);
disparity_dists[i] = disparity_dist;
}
for (int j = half_patch_size; j < plane_image->rows - half_patch_size; j++) {
for (int i = half_patch_size; i < plane_image->cols - half_patch_size;
i++) {
std::uniform_real_distribution<float>& disparity_dist =
disparity_dists[i];
float& d = disparity->at<float>(j, i);
d = disparity_dist(engine);
// Set disparity as negative for right image
if (is_right) {
d *= -1.0f;
}
Eigen::Vector3f n(0.0f, 0.0f, -1.0f);
if (!fronto_parallel) {
// z must be facing (negative), cos(theta) < 0
float theta = theta_dist(engine);
float phi = phi_dist(engine);
n[0] = sin(theta) * cos(phi);
n[1] = sin(theta) * sin(phi);
n[2] = -cos(theta);
n.normalize();
}
Vec3f& p = plane_image->at<Vec3f>(j, i);
float inverse_normal_z = 1.0f / n[2];
p[0] = -n[0] * inverse_normal_z;
p[1] = -n[1] * inverse_normal_z;
p[2] = (n[0] * i + n[1] * j + n[2] * d) * inverse_normal_z;
}
}
AssertDisparity(*disparity, half_patch_size, is_right);
AssertPlaneImage(*plane_image, half_patch_size, is_right);
timer.End();
ugu::LOGI("InitPlaneRandom: %.2f ms\n", timer.elapsed_msec());
return true;
}
inline bool SpatialPropagation(int nowx, int nowy, int fromx, int fromy,
const Image3b& first, const Image3b& second,
const Image1f& grad1, const Image1f& grad2,
Image1f* disparity1, Image1f* cost1,
PlaneImage* plane1,
const PatchMatchStereoParam& param,
bool is_right) {
float& now_cost = cost1->at<float>(nowy, nowx);
if (now_cost < 0.0000001f) {
return true;
}
const int half_ps = param.patch_size / 2;
int minx = nowx - half_ps;
int maxx = nowx + half_ps;
int miny = nowy - half_ps;
int maxy = nowy + half_ps;
const Vec3f& p = plane1->at<Vec3f>(fromy, fromx);
float d = p[0] * nowx + p[1] * nowy + p[2];
if (!ValidateDisparity(nowx, d, half_ps, first.cols, is_right)) {
return false;
}
// TODO: Integral image like accelation of original PatchMatch
float from_cost =
CalcPatchMatchCost(p, first, second, grad1, grad2, nowx, nowy, minx, maxx,
miny, maxy, param.gamma, param.alpha, param.tau_col,
param.tau_grad, half_ps, now_cost);
if (from_cost < now_cost) {
if (is_right) {
// printf("cost %f -> %f\n", now_cost, from_cost);
// printf("disp %f -> %f\n", disparity1->at<float>(nowy, nowx), d);
}
now_cost = from_cost;
plane1->at<Vec3f>(nowy, nowx) = p;
disparity1->at<float>(nowy, nowx) = d;
}
return true;
}
inline bool ViewPropagation(int nowx, int nowy, const Image3b& first,
const Image3b& second, const Image1f& grad1,
Image1f* disparity1, Image1f* cost1,
PlaneImage* plane1, const Image1f& grad2,
Image1f* disparity2, PlaneImage* plane2,
const PatchMatchStereoParam& param,
const Correspondence& first2second, bool is_right) {
const int half_ps = param.patch_size / 2;
float& now_cost = cost1->at<float>(nowy, nowx);
Vec3f& now_p = plane1->at<Vec3f>(nowy, nowx);
if (now_cost < 0.0000001f) {
return true;
}
std::vector<int> match_x_list;
first2second.Get(nowx, nowy, &match_x_list);
int minx = nowx - half_ps;
int maxx = nowx + half_ps;
int miny = nowy - half_ps;
int maxy = nowy + half_ps;
for (const int x2 : match_x_list) {
const Vec3f& p2 = plane2->at<Vec3f>(nowy, x2);
const float org_d = disparity2->at<float>(nowy, x2);
// Transform to first view
Vec3f trans_p;
trans_p[0] = p2[0];
trans_p[1] = p2[1];
trans_p[2] = static_cast<float>(static_cast<double>(-org_d) -
static_cast<double>(trans_p[0]) * nowx -
static_cast<double>(trans_p[1]) * nowy);
float d =
static_cast<float>(static_cast<double>(trans_p[0]) * nowx +
static_cast<double>(trans_p[1]) * nowy + trans_p[2]);
assert(std::abs(d + org_d) < 0.01f);
if (!ValidateDisparity(nowx, d, half_ps, first.cols, is_right)) {
continue;
}
float trans_cost =
CalcPatchMatchCost(trans_p, first, second, grad1, grad2, nowx, nowy,
minx, maxx, miny, maxy, param.gamma, param.alpha,
param.tau_col, param.tau_grad, half_ps, now_cost);
if (trans_cost < now_cost) {
now_cost = trans_cost;
now_p = trans_p;
disparity1->at<float>(nowy, nowx) = d;
}
}
#if 0
float d = now_p[0] * nowx + now_p[1] * nowy + now_p[2];
float rx = std::round(nowx - d);
// rx may be outside of image.
// Because original plane is defined for (posx, posy)
// but we are evaluating its surrounding pixels.
// TODO: better guard
rx = std::max(std::min(rx, static_cast<float>(second.cols - 1)), 0.0f);
int rx_i = static_cast<int>(rx);
float& trans_cost = cost2->at<float>(nowy, rx_i);
if (trans_cost < now_cost) {
}
#endif // 0
return true;
}
inline bool LeftRightConsistencyCheck(Image1f* ldisparity, Image1f* rdisparity,
Image1b* lvalid_mask,
Image1b* rvalid_mask,
float left_right_consistency_th,
int half_patch_size) {
Timer<> timer;
timer.Start();
if (lvalid_mask->rows != ldisparity->rows ||
lvalid_mask->cols != ldisparity->cols) {
*lvalid_mask = Image1b::zeros(ldisparity->rows, ldisparity->cols);
}
lvalid_mask->setTo(255);
if (rvalid_mask->rows != rdisparity->rows ||
rvalid_mask->cols != rdisparity->cols) {
*rvalid_mask = Image1b::zeros(rdisparity->rows, rdisparity->cols);
}
rvalid_mask->setTo(255);
// Check left to right
for (int j = 0; j < ldisparity->rows; j++) {
for (int i = 0; i < ldisparity->cols; i++) {
float& ld = ldisparity->at<float>(j, i);
float rx = std::round(i - ld);
rx = std::max(std::min(rx, static_cast<float>(rdisparity->cols - 1 -
half_patch_size)),
static_cast<float>(half_patch_size));
int rx_i = static_cast<int>(rx);
float& rd = rdisparity->at<float>(j, rx_i);
// Left disparity is plus, right one is negative
float diff = std::abs(ld + rd);
if (left_right_consistency_th < diff) {
ld = std::numeric_limits<float>::max();
rd = std::numeric_limits<float>::lowest();
lvalid_mask->at<unsigned char>(j, i) = 0;
// rvalid_mask->at<unsigned char>(j, rx_i) = 0;
}
}
}
// Check right to left
for (int j = 0; j < rdisparity->rows; j++) {
for (int i = 0; i < rdisparity->cols; i++) {
float& rd = rdisparity->at<float>(j, i);
float lx = std::round(i - rd);
lx = std::max(std::min(lx, static_cast<float>(ldisparity->cols - 1 -
half_patch_size)),
static_cast<float>(half_patch_size));
int lx_i = static_cast<int>(lx);
float& ld = ldisparity->at<float>(j, lx_i);
// Left disparity is plus, right one is negative
float diff = std::abs(ld + rd);
if (left_right_consistency_th < diff) {
ld = std::numeric_limits<float>::max();
rd = std::numeric_limits<float>::lowest();
rvalid_mask->at<unsigned char>(j, i) = 0;
// lvalid_mask->at<unsigned char>(j, lx_i) = 0;
}
}
}
timer.End();
ugu::LOGI("LeftRightConsistencyCheck: %.2f ms\n", timer.elapsed_msec());
return true;
}
inline bool FillHoleNn(Image1f* disparity, Image3f* plane,
const Image1b& valid_mask, int half_patch_size,
bool is_right) {
// TODO: Faster alogrithm
Timer<> timer;
timer.Start();
Image1b valid_mask_ = Image1b::zeros(valid_mask.rows, valid_mask.cols);
valid_mask.copyTo(valid_mask_);
for (int j = 0; j < disparity->rows; j++) {
for (int i = 0; i < disparity->cols; i++) {
unsigned char& v = valid_mask_.at<unsigned char>(j, i);
if (v == 255) {
continue;
}
bool updated = false;
float& d = disparity->at<float>(j, i);
Vec3f& p = plane->at<Vec3f>(j, i);
float ld = std::numeric_limits<float>::max();
Vec3f lp;
lp[0] = 0.0f;
lp[1] = 0.0f;
lp[2] = 0.0f;
for (int l = i - 1; 0 <= l; l--) {
unsigned char lv = valid_mask_.at<unsigned char>(j, l);
if (lv == 255) {
const Vec3f& tmp_p = plane->at<Vec3f>(j, l);
float tmp_d = tmp_p[0] * i + tmp_p[1] * j + tmp_p[2];
if (!ValidateDisparity(l, tmp_d, half_patch_size, disparity->cols,
is_right)) {
continue;
}
lp = tmp_p;
ld = tmp_d;
v = 255;
updated = true;
break;
}
}
float rd = std::numeric_limits<float>::max();
Vec3f rp;
rp[0] = 0.0f;
rp[1] = 0.0f;
rp[2] = 0.0f;
for (int r = i + 1; r < disparity->rows; r++) {
unsigned char rv = valid_mask_.at<unsigned char>(j, r);
if (rv == 255) {
const Vec3f& tmp_p = plane->at<Vec3f>(j, r);
float tmp_d = tmp_p[0] * i + tmp_p[1] * j + tmp_p[2];
if (!ValidateDisparity(r, tmp_d, half_patch_size, disparity->cols,
is_right)) {
continue;
}
rp = tmp_p;
rd = tmp_d;
v = 255;
updated = true;
break;
}
}
// If no valid plane, give up filling hole
if (!updated) {
continue;
}
// Prefer to lower disparity
if ((!is_right && rd < ld) || (is_right && rd > ld)) {
p = rp;
d = rd;
} else {
p = lp;
d = ld;
}
}
}
timer.End();
ugu::LOGI("FillHoleNn: %.2f ms\n", timer.elapsed_msec());
return true;
}
inline bool WeightedMedianForFilled(Image1f* disparity, const Image3b& color,
const Image1b& valid_mask,
const PatchMatchStereoParam& param,
bool is_right) {
Timer<> timer;
timer.Start();
const float inverse_gamma = 1.0f / param.gamma;
const int hk = param.patch_size / 2;
Image1f org_disparity = Image1f::zeros(valid_mask.rows, valid_mask.cols);
disparity->copyTo(org_disparity);
#ifdef UGU_USE_OPENMP
#pragma omp parallel for
#endif
for (int j = 0; j < disparity->rows; j++) {
int miny = std::max(j - hk, 0);
int maxy = std::min(j + hk, disparity->rows - 1);
for (int i = 0; i < disparity->cols; i++) {
unsigned char v = valid_mask.at<unsigned char>(j, i);
if (v == 255) {
continue;
}
const Vec3b& c = color.at<Vec3b>(j, i);
int minx = std::max(i - hk, 0);
int maxx = std::min(i + hk, disparity->cols - 1);
std::vector<float> data;
std::vector<float> weights;
for (int jj = miny; jj <= maxy; jj++) {
for (int ii = minx; ii <= maxx; ii++) {
#if 0
// Qestion: skip invalid pixels. Is this right?
if (valid_mask.at<unsigned char>(j, i) == 0) {
continue;
}
#endif
data.push_back(org_disparity.at<float>(jj, ii));
const Vec3b& near_c = color.at<Vec3b>(jj, ii);
float l1 = L1(c, near_c);
float w = std::exp(-l1 * inverse_gamma);
weights.push_back(w);
}
}
#if 0
if (weights[0] < 1 && WeightedMedian(data, weights) > 0) {
for (int k = 0; k < data.size(); k++) {
ugu::LOGI("%f %f\n", data[k], weights[k]);
}
ugu::LOGI("%f -> %f\n", disparity->at<float>(j, i),
WeightedMedian(data, weights));
}
#endif // 0
if (!data.empty()) {
disparity->at<float>(j, i) = ugu::WeightedMedian(data, weights);
} else {
disparity->at<float>(j, i) = is_right
? std::numeric_limits<float>::lowest()
: std::numeric_limits<float>::max();
}
}
}
timer.End();
ugu::LOGI("WeightedMedianForFilled: %.2f ms\n", timer.elapsed_msec());
return true;
}
inline bool RandomSearchPlaneRefinement(
int nowx, int nowy, const Image3b& first, const Image3b& second,
const Image1f& grad1, Image1f* disparity1, Image1f* cost1,
PlaneImage* plane1, const Image1f& grad2,
const PatchMatchStereoParam& param, std::default_random_engine& engine,
std::uniform_real_distribution<float>& uniform_dist, float z_max,
float theta_deg_max, float phi_deg_max, bool is_right) {
const int half_ps = param.patch_size / 2;
float& now_cost = cost1->at<float>(nowy, nowx);
if (now_cost < 0.0000001f) {
// return true;
}
int minx = nowx - half_ps;
int maxx = nowx + half_ps;
int miny = nowy - half_ps;
int maxy = nowy + half_ps;
float& now_d = disparity1->at<float>(nowy, nowx);
float random_d = now_d;
Vec3f& now_p = plane1->at<Vec3f>(nowy, nowx);
Vec3f random_p = now_p;
Vec3f normal_f;
Vec3d normal;
RecoverNormalFromPlane(random_p, &normal_f);
normal[0] = normal_f[0];
normal[1] = normal_f[1];
normal[2] = normal_f[2];
float random_d_max, random_d_min;
if (is_right) {
random_d_max = now_d + z_max <= 0.0f ? now_d + z_max : 0.0f;
random_d_min = (now_d - z_max) > -(first.cols - nowx - half_ps + 1)
? now_d - z_max
: -(first.cols - nowx - half_ps + 1);
} else {
random_d_max = (nowx - half_ps + 1) > (now_d + z_max)
? (now_d + z_max)
: (nowx - half_ps + 1);
random_d_min = now_d - z_max >= 0.0f ? now_d - z_max : 0.0f;
}
assert(random_d_max > random_d_min);
float r = uniform_dist(engine); // 0~1
random_d = (random_d_max - random_d_min) * r + random_d_min;
assert(int(nowx - random_d) >= 0.0f);
assert(int(nowx - random_d) <= first.cols - 1);
float theta = static_cast<float>(acos(-normal[2])); // positive
float phi = static_cast<float>(atan2(normal[1], normal[0]));
float rtheta_min = theta - radians(theta_deg_max) >= 0.0f
? theta - radians(theta_deg_max)
: 0.0f;
const float theta_deg_max_internal = 89.0f;
float rtheta_max =
theta + radians(theta_deg_max) <= radians(theta_deg_max_internal)
? theta + radians(theta_deg_max)
: radians(theta_deg_max_internal);
r = uniform_dist(engine);
theta = (rtheta_max - rtheta_min) * r + rtheta_min;
assert(theta >= 0.0f && theta <= pi * 0.5);
// theta += static_cast<float>(pi * 0.5f);
r = uniform_dist(engine);
r = 2.0f * r - 1.0f;
phi = radians(phi_deg_max) * r + phi;
normal[0] = sin(theta) * cos(phi);
normal[1] = sin(theta) * sin(phi);
normal[2] = -cos(theta);
assert(normal[2] <= 0.0f && normal[2] >= -1.0f);
double len =
normal[0] * normal[0] + normal[1] * normal[1] + normal[2] * normal[2];
normal[0] /= len;
normal[1] /= len;
normal[2] /= len;
double inverse_normal_z = 1.0 / normal[2];
random_p[0] = static_cast<float>(-normal[0] * inverse_normal_z);
random_p[1] = static_cast<float>(-normal[1] * inverse_normal_z);
random_p[2] = static_cast<float>(
(normal[0] * nowx + normal[1] * nowy + normal[2] * random_d) *
inverse_normal_z);
float random_cost =
CalcPatchMatchCost(random_p, first, second, grad1, grad2, nowx, nowy,
minx, maxx, miny, maxy, param.gamma, param.alpha,
param.tau_col, param.tau_grad, half_ps, now_cost);
if (random_cost < now_cost) {
now_cost = random_cost;
now_p = random_p;
now_d = random_d;
}
return true;
}
inline bool DebugDump(const Image1f& disparity, const Image1f& cost,
const std::string& prefix, bool is_right) {
Image1b vis_disp = Image1b::zeros(disparity.rows, disparity.cols);
Image1f tmp_disp = Image1f::zeros(disparity.rows, disparity.cols);
disparity.copyTo(tmp_disp);
if (is_right) {
for (int j = 0; j < tmp_disp.rows; j++) {
for (int i = 0; i < tmp_disp.cols; i++) {
tmp_disp.at<float>(j, i) *= -1.0f;
}
}
}
Depth2Gray(tmp_disp, &vis_disp, 0.0f, 255.0f / 3);
ugu::imwrite("disp_" + prefix + ".png", vis_disp);
Image3b vis_cost = Image3b::zeros(disparity.rows, disparity.cols);
VisualizeCost(cost, &vis_cost, 0, 100);
ugu::imwrite("cost_" + prefix + ".png", vis_cost);
return true;
}
inline bool DebugDump(const Image1f& disparity, const Image1f& cost, int iter,
int stage, bool is_right) {
std::string prefix = is_right ? "r_" : "l_";
prefix = prefix + std::to_string(iter) + "_" + std::to_string(stage);
DebugDump(disparity, cost, prefix, is_right);
return true;
}
inline bool ComputePatchMatchStereoImplBodyFromUpperLeft(
const Image3b& first, const Image3b& second, const Image1f& grad1,
Image1f* disparity1, Image1f* cost1, PlaneImage* plane1,
const Image1f& grad2, Image1f* disparity2, Image1f* cost2,
PlaneImage* plane2, const PatchMatchStereoParam& param,
const Correspondence& first2second, std::default_random_engine& engine,
std::uniform_real_distribution<float>& uniform_dist, float z_max,
float theta_deg_max, float phi_deg_max, bool is_right, int iter) {
Timer<> timer;
timer.Start();
(void)cost2;
const int half_ps = param.patch_size / 2;
const int w = first.cols;
const int h = first.rows;
for (int j = half_ps; j < h - half_ps; j++) {
if (param.debug && (j - half_ps) % param.debug_step == 0) {
int stage = (j - half_ps) / param.debug_step;
DebugDump(*disparity1, *cost1, iter, stage, is_right);
}
for (int i = half_ps; i < w - half_ps; i++) {
// Spacial propagation
SpatialPropagation(i, j, i - 1, j, first, second, grad1, grad2,
disparity1, cost1, plane1, param, is_right);
SpatialPropagation(i, j, i, j - 1, first, second, grad1, grad2,
disparity1, cost1, plane1, param, is_right);
// View propagation
if (param.view_propagation) {
ViewPropagation(i, j, first, second, grad1, disparity1, cost1, plane1,
grad2, disparity2, plane2, param, first2second,
is_right);
}
// Temporal propagation
if (param.temporal_propagation) {
}
// Plane refinement (random re-initialization)
if (param.plane_refinement) {
RandomSearchPlaneRefinement(
i, j, first, second, grad1, disparity1, cost1, plane1, grad2, param,
engine, uniform_dist, z_max, theta_deg_max, phi_deg_max, is_right);
}
}
}
timer.End();
ugu::LOGI("ComputePatchMatchStereoImplBodyFromUpperLeft: %.2f ms\n",
timer.elapsed_msec());
return true;
}
inline bool ComputePatchMatchStereoImplBodyFromLowerRight(
const Image3b& first, const Image3b& second, const Image1f& grad1,
Image1f* disparity1, Image1f* cost1, PlaneImage* plane1,
const Image1f& grad2, Image1f* disparity2, Image1f* cost2,
PlaneImage* plane2, const PatchMatchStereoParam& param,
const Correspondence& first2second, std::default_random_engine& engine,
std::uniform_real_distribution<float>& uniform_dist, float z_max,
float theta_deg_max, float phi_deg_max, bool is_right, int iter) {
Timer<> timer;
timer.Start();
(void)cost2;
const int half_ps = param.patch_size / 2;
const int w = first.cols;
const int h = first.rows;
for (int j = h - half_ps - 1; half_ps < j; j--) {
if (param.debug && ((h - half_ps - 1) - j) % param.debug_step == 0) {
int stage = ((h - half_ps - 1) - j) / param.debug_step;
DebugDump(*disparity1, *cost1, iter, stage, is_right);
}
for (int i = w - half_ps - 1; half_ps < i; i--) {
// Spacial propagation
SpatialPropagation(i, j, i + 1, j, first, second, grad1, grad2,
disparity1, cost1, plane1, param, is_right);
SpatialPropagation(i, j, i, j + 1, first, second, grad1, grad2,
disparity1, cost1, plane1, param, is_right);
// View propagation
if (param.view_propagation) {
ViewPropagation(i, j, first, second, grad1, disparity1, cost1, plane1,
grad2, disparity2, plane2, param, first2second,
is_right);
}
// Temporal propagation
if (param.temporal_propagation) {
}
// Plane refinement (random re-initialization)
if (param.plane_refinement) {
RandomSearchPlaneRefinement(
i, j, first, second, grad1, disparity1, cost1, plane1, grad2, param,
engine, uniform_dist, z_max, theta_deg_max, phi_deg_max, is_right);
}
}
}
timer.End();
ugu::LOGI("ComputePatchMatchStereoImplBodyFromLowerRight: %.2f ms\n",
timer.elapsed_msec());
return true;
}
inline bool ComputePatchMatchStereoImpl(const Image3b& left,
const Image3b& right,
Image1f* ldisparity, Image1f* lcost,
Image1f* rdisparity, Image1f* rcost,
Image1f* depth,
const PatchMatchStereoParam& param) {
if (left.rows != right.rows || left.cols != right.cols) {
LOGE("Left and right size missmatch\n");
return false;
}
const int w = left.cols;
const int h = left.rows;
if (ldisparity->rows != h || ldisparity->cols != w) {
*ldisparity = Image1f::zeros(h, w);
}
if (lcost->rows != h || lcost->cols != w) {
*lcost = Image1f::zeros(h, w);
lcost->setTo(std::numeric_limits<float>::max());
}
if (rdisparity->rows != h || rdisparity->cols != w) {
*rdisparity = Image1f::zeros(h, w);
}
if (rcost->rows != h || rcost->cols != w) {
*rcost = Image1f::zeros(h, w);
rcost->setTo(std::numeric_limits<float>::max());
}
if (depth->rows != h || depth->cols != w) {
*depth = Image1f::zeros(h, w);
}
std::default_random_engine engine(param.random_seed);
PlaneImage lplane = Image3f::zeros(left.rows, left.cols);
PlaneImage rplane = Image3f::zeros(left.rows, left.cols);
Image1b left_gray, right_gray;
Color2Gray(left, &left_gray);
Color2Gray(right, &right_gray);
Image1f lgrad, rgrad;
lgrad = Image1f::zeros(left.rows, left.cols);
rgrad = Image1f::zeros(left.rows, left.cols);
SobelX(left_gray, &lgrad);
SobelX(right_gray, &rgrad);
if (param.debug) {
Image1b vis_lgrad, vis_rgrad;
Depth2Gray(lgrad, &vis_lgrad, -200, 200);
Depth2Gray(rgrad, &vis_rgrad, -200, 200);
ugu::imwrite("lgrad.png", vis_lgrad);
ugu::imwrite("rgrad.png", vis_rgrad);
}
// Random Initilalization
InitPlaneRandom(&lplane, ldisparity, engine,
param.initial_random_disparity_range,
param.fronto_parallel_window, param.patch_size / 2, false,
param.theta_deg_min, param.theta_deg_max, param.phi_deg_min,
param.phi_deg_max);
CalcPatchMatchCost(lplane, left, right, lgrad, rgrad, lcost, param);
InitPlaneRandom(&rplane, rdisparity, engine,
param.initial_random_disparity_range,
param.fronto_parallel_window, param.patch_size / 2, true,
param.theta_deg_min, param.theta_deg_max, param.phi_deg_min,
param.phi_deg_max);
CalcPatchMatchCost(rplane, right, left, rgrad, lgrad, rcost, param);
Correspondence l2r, r2l;
l2r.Update(*rdisparity, param.patch_size / 2);
r2l.Update(*ldisparity, param.patch_size / 2);
if (param.debug) {
DebugDump(*ldisparity, *lcost, "l_init", false);
DebugDump(*rdisparity, *rcost, "r_init", true);
}
float z_max = param.initial_random_disparity_range > 0.0f
? param.initial_random_disparity_range * 0.5f
: static_cast<float>(left.rows - 1) * 0.5f;
float theta_deg_max = param.theta_deg_max * 0.5f;
// float theta_deg_min = param.theta_deg_min;
float phi_deg_max = param.phi_deg_max * 0.5f;
// float phi_deg_min = param.phi_deg_min;
std::uniform_real_distribution<float> dist(0.0f, 1.0f);
for (int i = 0; i < param.iter; i++) {
ugu::LOGI("iter %d\n", i);
if (param.alternately_reverse && i % 2 == 1) {
// Left
ugu::LOGI("reverse left\n");
ComputePatchMatchStereoImplBodyFromLowerRight(
left, right, lgrad, ldisparity, lcost, &lplane, rgrad, rdisparity,
rcost, &rplane, param, l2r, engine, dist, z_max, theta_deg_max,
phi_deg_max, false, i);
r2l.Update(*ldisparity, param.patch_size / 2);
// Right
ugu::LOGI("reverse right\n");
ComputePatchMatchStereoImplBodyFromLowerRight(
right, left, rgrad, rdisparity, rcost, &rplane, lgrad, ldisparity,
lcost, &lplane, param, r2l, engine, dist, z_max, theta_deg_max,
phi_deg_max, true, i);
l2r.Update(*rdisparity, param.patch_size / 2);
} else {
// Left
ugu::LOGI("left\n");
ComputePatchMatchStereoImplBodyFromUpperLeft(
left, right, lgrad, ldisparity, lcost, &lplane, rgrad, rdisparity,
rcost, &rplane, param, l2r, engine, dist, z_max, theta_deg_max,
phi_deg_max, false, i);
r2l.Update(*ldisparity, param.patch_size / 2);
// Right
ugu::LOGI("right\n");
ComputePatchMatchStereoImplBodyFromUpperLeft(
right, left, rgrad, rdisparity, rcost, &rplane, lgrad, ldisparity,
lcost, &lplane, param, r2l, engine, dist, z_max, theta_deg_max,
phi_deg_max, true, i);
l2r.Update(*rdisparity, param.patch_size / 2);
}
z_max = z_max * 0.5f;
if (z_max < 0.1f) {
z_max = 0.1f;
}
theta_deg_max *= 0.5f;
phi_deg_max *= 0.5f;
AssertDisparity(*ldisparity, param.patch_size / 2, false);
AssertDisparity(*rdisparity, param.patch_size / 2, true);
AssertPlaneImage(lplane, param.patch_size / 2, false);
AssertPlaneImage(rplane, param.patch_size / 2, true);
}
if (param.debug) {
DebugDump(*ldisparity, *lcost, "l_iterend", false);
DebugDump(*rdisparity, *rcost, "r_iterend", true);
}
// Post-processing
if (param.base_param.left_right_consistency) {
Image1b l_valid_mask, r_valid_mask;
LeftRightConsistencyCheck(
ldisparity, rdisparity, &l_valid_mask, &r_valid_mask,
param.base_param.left_right_consistency_th, param.patch_size / 2);
if (param.debug) {
ugu::imwrite("l_valid_mask.png", l_valid_mask);
ugu::imwrite("r_valid_mask.png", r_valid_mask);
DebugDump(*ldisparity, *lcost, "l_lrconsistency", false);
DebugDump(*rdisparity, *rcost, "r_lrconsistency", true);
}
// Hole filling for invalidated pixels
if (param.fill_hole_nn) {
FillHoleNn(ldisparity, &lplane, l_valid_mask, param.patch_size / 2,
false);
FillHoleNn(rdisparity, &rplane, r_valid_mask, param.patch_size / 2, true);
if (param.debug) {
DebugDump(*ldisparity, *lcost, "l_fillholenn", false);
DebugDump(*rdisparity, *rcost, "r_fillholenn", true);
}
// Weighted median filter for filled pixels
if (param.weighted_median_for_filled) {
WeightedMedianForFilled(ldisparity, left, l_valid_mask, param, false);
WeightedMedianForFilled(rdisparity, right, r_valid_mask, param, true);
if (param.debug) {
DebugDump(*ldisparity, *lcost, "l_filled", false);
DebugDump(*rdisparity, *rcost, "r_filled", true);
}
}
}
}
Disparity2Depth(*ldisparity, depth, param.base_param.baseline,
param.base_param.fx, param.base_param.lcx,
param.base_param.rcx, param.base_param.mind,
param.base_param.maxd);
#if 0
Disparity2Depth(*rdisparity, depth, -param.base_param.baseline,
param.base_param.fx, param.base_param.lcx,
param.base_param.rcx, param.base_param.mind,
param.base_param.maxd);
#endif
if (param.debug) {
DebugDump(*ldisparity, *lcost, "l_end", false);
DebugDump(*rdisparity, *rcost, "r_end", true);
}
return true;
}
} // namespace ugu
|
GB_dense_subassign_22_template.c | //------------------------------------------------------------------------------
// GB_dense_subassign_22_template: C += x where C is dense and x is a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// All entries in C+=A are computed fully in parallel, using the same kind of
// parallelism as Template/GB_AxB_colscale.c.
{
//--------------------------------------------------------------------------
// get C
//--------------------------------------------------------------------------
GB_CTYPE *GB_RESTRICT Cx = C->x ;
const int64_t cnz = GB_NNZ (C) ;
//--------------------------------------------------------------------------
// C += x where C is dense and x is a scalar
//--------------------------------------------------------------------------
int64_t pC ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < cnz ; pC++)
{
GB_BINOP (GB_CX (pC), GB_CX (pC), ywork) ;
}
}
|
syr2k.plutotileparallel.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* syr2k.c: this file is part of PolyBench/C */
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
#include "syr2k.h"
/* Array initialization. */
static
void init_array(int n, int m,
DATA_TYPE *alpha,
DATA_TYPE *beta,
DATA_TYPE POLYBENCH_2D(C,N,N,n,n),
DATA_TYPE POLYBENCH_2D(A,N,M,n,m),
DATA_TYPE POLYBENCH_2D(B,N,M,n,m))
{
int i, j;
*alpha = 1.5;
*beta = 1.2;
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
A[i][j] = (DATA_TYPE) ((i*j+1)%n) / n;
B[i][j] = (DATA_TYPE) ((i*j+2)%m) / m;
}
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
C[i][j] = (DATA_TYPE) ((i*j+3)%n) / m;
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n,
DATA_TYPE POLYBENCH_2D(C,N,N,n,n))
{
int i, j;
POLYBENCH_DUMP_START;
POLYBENCH_DUMP_BEGIN("C");
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
if ((i * n + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n");
fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, C[i][j]);
}
POLYBENCH_DUMP_END("C");
POLYBENCH_DUMP_FINISH;
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_syr2k(int n, int m,
DATA_TYPE alpha,
DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(C,N,N,n,n),
DATA_TYPE POLYBENCH_2D(A,N,M,n,m),
DATA_TYPE POLYBENCH_2D(B,N,M,n,m))
{
int i, j, k;
//BLAS PARAMS
//UPLO = 'L'
//TRANS = 'N'
//A is NxM
//B is NxM
//C is NxN
/* Copyright (C) 1991-2016 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses Unicode 8.0.0. Version 8.0 of the Unicode Standard is
synchronized with ISO/IEC 10646:2014, plus Amendment 1 (published
2015-05-15). */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if (_PB_N >= 1) {
lbp=0;
ubp=floord(_PB_N-1,32);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=0;t3<=t2;t3++) {
for (t4=32*t2;t4<=min(_PB_N-1,32*t2+31);t4++) {
lbv=32*t3;
ubv=min(t4,32*t3+31);
#pragma ivdep
#pragma vector always
for (t5=lbv;t5<=ubv;t5++) {
C[t4][t5] *= beta;;
}
}
}
}
if (_PB_M >= 1) {
lbp=0;
ubp=floord(_PB_N-1,32);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=0;t3<=t2;t3++) {
for (t4=0;t4<=floord(_PB_M-1,32);t4++) {
for (t5=32*t2;t5<=min(_PB_N-1,32*t2+31);t5++) {
for (t6=32*t3;t6<=min(t5,32*t3+31);t6++) {
for (t7=32*t4;t7<=min(_PB_M-1,32*t4+31);t7++) {
C[t5][t6] += A[t6][t7]*alpha*B[t5][t7] + B[t6][t7]*alpha*A[t5][t7];;
}
}
}
}
}
}
}
}
/* End of CLooG code */
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
double footprint = 8*(n*n + 2*n*m); // HAVERFORD added code
double FP_ops = 3.0 * m * (n + 1) * n; // HAVERFORD added code
#ifdef POLYBENCH_GFLOPS
polybench_set_program_flops(FP_ops); // HAVERFORD addition
#endif
#if defined POLYFORD_VERBOSE
printf("Starting %s, n=%8d, m=%8d, Footprint %8.4g M, Source FP ops=%8.4g G\n",
__FILE__, n, m, footprint / (1024 * 1024), FP_ops/1000000000.0);
#endif
/* Variable declaration/allocation. */
DATA_TYPE alpha;
DATA_TYPE beta;
POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE,N,N,n,n);
POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,N,M,n,m);
POLYBENCH_2D_ARRAY_DECL(B,DATA_TYPE,N,M,n,m);
/* Initialize array(s). */
init_array (n, m, &alpha, &beta,
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_syr2k (n, m,
alpha, beta,
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(C)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
1.norace13.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define M 20
#define N 20
int main() {
double A[M][N], B[M][N], C[N][N];
#pragma omp parallel for
for (int i = 0; i < M; i++)
for (int j = 0; j < N; j++) {
A[i][j] = 0.0;
for (int k = 0; k < N; ++k)
A[i][j] += B[i][k] * C[k][j];
}
}
// CHECK: Region is Data Race Free.
// END
|
GB_binop__lxor_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_int64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__lxor_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_int64)
// A*D function (colscale): GB (_AxD__lxor_int64)
// D*A function (rowscale): GB (_DxB__lxor_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_int64)
// C=scalar+B GB (_bind1st__lxor_int64)
// C=scalar+B' GB (_bind1st_tran__lxor_int64)
// C=A+scalar GB (_bind2nd__lxor_int64)
// C=A'+scalar GB (_bind2nd_tran__lxor_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_INT64 || GxB_NO_LXOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lxor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lxor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lxor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.