source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
interpolation_p1.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
static inline void interpolation_p1_block(level_type *level_f, int id_f, double prescale_f, level_type *level_c, int id_c, blockCopy_type *block){
// interpolate 3D array from read_i,j,k of read[] to write_i,j,k in write[]
int write_dim_i = block->dim.i<<1; // calculate the dimensions of the resultant fine block
int write_dim_j = block->dim.j<<1;
int write_dim_k = block->dim.k<<1;
int read_i = block->read.i;
int read_j = block->read.j;
int read_k = block->read.k;
int read_jStride = block->read.jStride;
int read_kStride = block->read.kStride;
int write_i = block->write.i;
int write_j = block->write.j;
int write_k = block->write.k;
int write_jStride = block->write.jStride;
int write_kStride = block->write.kStride;
const double * __restrict__ read = block->read.ptr;
double * __restrict__ write = block->write.ptr;
if(block->read.box >=0){
read_jStride = level_c->my_boxes[block->read.box ].jStride;
read_kStride = level_c->my_boxes[block->read.box ].kStride;
read = level_c->my_boxes[ block->read.box].vectors[id_c] + level_c->box_ghosts*(1+ read_jStride+ read_kStride);
}
if(block->write.box>=0){
write_jStride = level_f->my_boxes[block->write.box].jStride;
write_kStride = level_f->my_boxes[block->write.box].kStride;
write = level_f->my_boxes[block->write.box].vectors[id_f] + level_f->box_ghosts*(1+write_jStride+write_kStride);
}
int i,j,k;
for(k=0;k<write_dim_k;k++){int delta_k=-read_kStride;if(k&0x1)delta_k=read_kStride;
for(j=0;j<write_dim_j;j++){int delta_j=-read_jStride;if(j&0x1)delta_j=read_jStride;
for(i=0;i<write_dim_i;i++){int delta_i= -1;if(i&0x1)delta_i= 1; // i.e. even points look backwards while odd points look forward
int write_ijk = ((i )+write_i) + (((j )+write_j)*write_jStride) + (((k )+write_k)*write_kStride);
int read_ijk = ((i>>1)+ read_i) + (((j>>1)+ read_j)* read_jStride) + (((k>>1)+ read_k)* read_kStride);
//
// | o | o |
// +---+---+---+---+
// | | x | x | |
//
// CAREFUL !!! you must guarantee you zero'd the MPI buffers(write[]) and destination boxes at some point to avoid 0.0*NaN or 0.0*inf
// piecewise linear interpolation... NOTE, BC's must have been previously applied
write[write_ijk] = prescale_f*write[write_ijk] +
0.421875*read[read_ijk ] +
0.140625*read[read_ijk +delta_k] +
0.140625*read[read_ijk +delta_j ] +
0.046875*read[read_ijk +delta_j+delta_k] +
0.140625*read[read_ijk+delta_i ] +
0.046875*read[read_ijk+delta_i +delta_k] +
0.046875*read[read_ijk+delta_i+delta_j ] +
0.015625*read[read_ijk+delta_i+delta_j+delta_k];
}}}
}
//------------------------------------------------------------------------------------------------------------------------------
// perform a (inter-level) piecewise linear interpolation
void interpolation_p1(level_type * level_f, int id_f, double prescale_f, level_type *level_c, int id_c){
exchange_boundary(level_c,id_c,STENCIL_SHAPE_BOX);
apply_BCs_p1(level_c,id_c,STENCIL_SHAPE_BOX);
double _timeCommunicationStart = getTime();
double _timeStart,_timeEnd;
int buffer=0;
int n;
int my_tag = (level_f->tag<<4) | 0x7;
#ifdef USE_MPI
// by convention, level_f allocates a combined array of requests for both level_f recvs and level_c sends...
int nMessages = level_c->interpolation.num_sends + level_f->interpolation.num_recvs;
MPI_Request *recv_requests = level_f->interpolation.requests;
MPI_Request *send_requests = level_f->interpolation.requests + level_f->interpolation.num_recvs;
// loop through packed list of MPI receives and prepost Irecv's...
if(level_f->interpolation.num_recvs>0){
_timeStart = getTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_f->interpolation.num_recvs;n++){
MPI_Irecv(level_f->interpolation.recv_buffers[n],
level_f->interpolation.recv_sizes[n],
MPI_DOUBLE,
level_f->interpolation.recv_ranks[n],
my_tag,
MPI_COMM_WORLD,
&recv_requests[n]
);
}
_timeEnd = getTime();
level_f->timers.interpolation_recv += (_timeEnd-_timeStart);
}
// pack MPI send buffers...
if(level_c->interpolation.num_blocks[0]>0){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[0])
for(buffer=0;buffer<level_c->interpolation.num_blocks[0];buffer++){
// !!! prescale==0 because you don't want to increment the MPI buffer
interpolation_p1_block(level_f,id_f,0.0,level_c,id_c,&level_c->interpolation.blocks[0][buffer]);
}
_timeEnd = getTime();
level_f->timers.interpolation_pack += (_timeEnd-_timeStart);
}
// loop through MPI send buffers and post Isend's...
if(level_c->interpolation.num_sends>0){
_timeStart = getTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_c->interpolation.num_sends;n++){
MPI_Isend(level_c->interpolation.send_buffers[n],
level_c->interpolation.send_sizes[n],
MPI_DOUBLE,
level_c->interpolation.send_ranks[n],
my_tag,
MPI_COMM_WORLD,
&send_requests[n]
);
}
_timeEnd = getTime();
level_f->timers.interpolation_send += (_timeEnd-_timeStart);
}
#endif
// perform local interpolation... try and hide within Isend latency...
if(level_c->interpolation.num_blocks[1]>0){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[1])
for(buffer=0;buffer<level_c->interpolation.num_blocks[1];buffer++){
interpolation_p1_block(level_f,id_f,prescale_f,level_c,id_c,&level_c->interpolation.blocks[1][buffer]);
}
_timeEnd = getTime();
level_f->timers.interpolation_local += (_timeEnd-_timeStart);
}
// wait for MPI to finish...
#ifdef USE_MPI
if(nMessages>0){
_timeStart = getTime();
MPI_Waitall(nMessages,level_f->interpolation.requests,level_f->interpolation.status);
_timeEnd = getTime();
level_f->timers.interpolation_wait += (_timeEnd-_timeStart);
}
// unpack MPI receive buffers
if(level_f->interpolation.num_blocks[2]>0){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->interpolation.num_blocks[2])
for(buffer=0;buffer<level_f->interpolation.num_blocks[2];buffer++){
IncrementBlock(level_f,id_f,prescale_f,&level_f->interpolation.blocks[2][buffer]);
}
_timeEnd = getTime();
level_f->timers.interpolation_unpack += (_timeEnd-_timeStart);
}
#endif
level_f->timers.interpolation_total += (double)(getTime()-_timeCommunicationStart);
}
|
colorspace.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE %
% C O O L O O R R SS P P A A C E %
% C O O L O O RRRR SSS PPPP AAAAA C EEE %
% C O O L O O R R SS P A A C E %
% CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE %
% %
% %
% MagickCore Image Colorspace Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/property.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/utility.h"
/*
Typedef declarations.
*/
typedef struct _TransformPacket
{
MagickRealType
x,
y,
z;
} TransformPacket;
/*
Forward declarations.
*/
static MagickBooleanType
TransformsRGBImage(Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C o l o r s p a c e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageColorspaceType() returns the potential type of image:
% sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc.
%
% To ensure the image type matches its potential, use SetImageColorspaceType():
%
% (void) SetImageColorspaceType(image,GetImageColorspaceType(image),
% exception);
%
% The format of the GetImageColorspaceType method is:
%
% ColorspaceType GetImageColorspaceType(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ColorspaceType GetImageColorspaceType(const Image *image,
ExceptionInfo *exception)
{
ColorspaceType
colorspace;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colorspace=image->colorspace;
type=IdentifyImageType(image,exception);
if ((type == BilevelType) || (type == GrayscaleType) ||
(type == GrayscaleAlphaType))
colorspace=GRAYColorspace;
return(colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ s R G B T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% sRGBTransformImage() converts the reference image from sRGB to an alternate
% colorspace. The transformation matrices are not the standard ones: the
% weights are rescaled to normalized the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the sRGBTransformImage method is:
%
% MagickBooleanType sRGBTransformImage(Image *image,
% const ColorspaceType colorspace,EsceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertRGBToCMY(const double red,const double green,
const double blue,double *cyan,double *magenta,double *yellow)
{
*cyan=QuantumScale*(QuantumRange-red);
*magenta=QuantumScale*(QuantumRange-green);
*yellow=QuantumScale*(QuantumRange-blue);
}
static inline void ConvertXYZToLMS(const double x,const double y,
const double z,double *L,double *M,double *S)
{
*L=0.7328*x+0.4296*y-0.1624*z;
*M=(-0.7036*x+1.6975*y+0.0061*z);
*S=0.0030*x+0.0136*y+0.9834*z;
}
static void ConvertRGBToLMS(const double red,const double green,
const double blue,double *L,double *M,double *S)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLMS(X,Y,Z,L,M,S);
}
static void ConvertRGBToLab(const double red,const double green,
const double blue,double *L,double *a,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLab(X,Y,Z,L,a,b);
}
static void ConvertRGBToLuv(const double red,const double green,
const double blue,double *L,double *u,double *v)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLuv(X,Y,Z,L,u,v);
}
static void ConvertRGBToxyY(const double red,const double green,
const double blue,double *low_x,double *low_y,double *cap_Y)
{
double
gamma,
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
gamma=PerceptibleReciprocal(X+Y+Z);
*low_x=gamma*X;
*low_y=gamma*Y;
*cap_Y=Y;
}
static void ConvertRGBToYDbDr(const double red,const double green,
const double blue,double *Y,double *Db,double *Dr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5;
*Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5;
}
static void ConvertRGBToYIQ(const double red,const double green,
const double blue,double *Y,double *I,double *Q)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5;
*Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5;
}
static void ConvertRGBToYPbPr(const double red,const double green,
const double blue,double *Y,double *Pb,double *Pr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5;
*Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5;
}
static void ConvertRGBToYCbCr(const double red,const double green,
const double blue,double *Y,double *Cb,double *Cr)
{
ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr);
}
static void ConvertRGBToYUV(const double red,const double green,
const double blue,double *Y,double *U,double *V)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5;
*V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5;
}
static MagickBooleanType sRGBTransformImage(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define sRGBTransformImageTag "RGBTransform/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
primary_info;
register ssize_t
i;
ssize_t
y;
TransformPacket
*x_map,
*y_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(colorspace != sRGBColorspace);
assert(colorspace != TransparentColorspace);
assert(colorspace != UndefinedColorspace);
status=MagickTrue;
progress=0;
switch (colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Convert RGB to CMYK colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertRGBToCMYK(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
case GRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGray(image,ClampToQuantum(GetPixelIntensity(image,q)),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case CMYColorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
/*
Transform image from sRGB to target colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case CMYColorspace:
{
ConvertRGBToCMY(red,green,blue,&X,&Y,&Z);
break;
}
case HCLColorspace:
{
ConvertRGBToHCL(red,green,blue,&X,&Y,&Z);
break;
}
case HCLpColorspace:
{
ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z);
break;
}
case HSBColorspace:
{
ConvertRGBToHSB(red,green,blue,&X,&Y,&Z);
break;
}
case HSIColorspace:
{
ConvertRGBToHSI(red,green,blue,&X,&Y,&Z);
break;
}
case HSLColorspace:
{
ConvertRGBToHSL(red,green,blue,&X,&Y,&Z);
break;
}
case HSVColorspace:
{
ConvertRGBToHSV(red,green,blue,&X,&Y,&Z);
break;
}
case HWBColorspace:
{
ConvertRGBToHWB(red,green,blue,&X,&Y,&Z);
break;
}
case LabColorspace:
{
ConvertRGBToLab(red,green,blue,&X,&Y,&Z);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z);
break;
}
case LCHuvColorspace:
{
ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z);
break;
}
case LMSColorspace:
{
ConvertRGBToLMS(red,green,blue,&X,&Y,&Z);
break;
}
case LuvColorspace:
{
ConvertRGBToLuv(red,green,blue,&X,&Y,&Z);
break;
}
case xyYColorspace:
{
ConvertRGBToxyY(red,green,blue,&X,&Y,&Z);
break;
}
case XYZColorspace:
{
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
break;
}
case YCbCrColorspace:
{
ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z);
break;
}
case YDbDrColorspace:
{
ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z);
break;
}
case YIQColorspace:
{
ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z);
break;
}
case YPbPrColorspace:
{
ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z);
break;
}
case YUVColorspace:
{
ConvertRGBToYUV(red,green,blue,&X,&Y,&Z);
break;
}
default:
{
X=QuantumScale*red;
Y=QuantumScale*green;
Z=QuantumScale*blue;
break;
}
}
SetPixelRed(image,ClampToQuantum(QuantumRange*X),q);
SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q);
SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
#define DisplayGamma (1.0/1.7)
#define FilmGamma 0.6
#define ReferenceBlack 95.0
#define ReferenceWhite 685.0
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform RGB to Log colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/
film_gamma);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+
log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/
film_gamma))/1024.0));
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) DecodePixelGamma((MagickRealType)
GetPixelRed(image,q));
green=(double) DecodePixelGamma((MagickRealType)
GetPixelGreen(image,q));
blue=(double) DecodePixelGamma((MagickRealType)
GetPixelBlue(image,q));
SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q);
SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))],
q);
SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform image from sRGB to linear RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(&primary_info,0,sizeof(primary_info));
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
I and Q, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.33333*(double) i);
y_map[i].x=(MagickRealType) (0.33334*(double) i);
z_map[i].x=(MagickRealType) (0.33333*(double) i);
x_map[i].y=(MagickRealType) (0.50000*(double) i);
y_map[i].y=(MagickRealType) (0.00000*(double) i);
z_map[i].y=(MagickRealType) (-0.50000*(double) i);
x_map[i].z=(MagickRealType) (-0.25000*(double) i);
y_map[i].z=(MagickRealType) (0.50000*(double) i);
z_map[i].z=(MagickRealType) (-0.25000*(double) i);
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.601):
Y = 0.2988390*R+0.5868110*G+0.1143500*B
Cb= -0.1687367*R-0.3312640*G+0.5000000*B
Cr= 0.5000000*R-0.4186880*G-0.0813120*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839*(double) i);
y_map[i].x=(MagickRealType) (0.586811*(double) i);
z_map[i].x=(MagickRealType) (0.114350*(double) i);
x_map[i].y=(MagickRealType) (-0.1687367*(double) i);
y_map[i].y=(MagickRealType) (-0.331264*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].z=(MagickRealType) (-0.418688*(double) i);
z_map[i].z=(MagickRealType) (-0.081312*(double) i);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.709):
Y = 0.212656*R+0.715158*G+0.072186*B
Cb= -0.114572*R-0.385428*G+0.500000*B
Cr= 0.500000*R-0.454153*G-0.045847*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.212656*(double) i);
y_map[i].x=(MagickRealType) (0.715158*(double) i);
z_map[i].x=(MagickRealType) (0.072186*(double) i);
x_map[i].y=(MagickRealType) (-0.114572*(double) i);
y_map[i].y=(MagickRealType) (-0.385428*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].z=(MagickRealType) (-0.454153*(double) i);
z_map[i].z=(MagickRealType) (-0.045847*(double) i);
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
Y = 0.298839*R+0.586811*G+0.114350*B
C1= -0.298839*R-0.586811*G+0.88600*B
C2= 0.70100*R-0.586811*G-0.114350*B
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156));
primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137));
for (i=0; i <= (ssize_t) (0.018*MaxMap); i++)
{
x_map[i].x=0.005382*i;
y_map[i].x=0.010566*i;
z_map[i].x=0.002052*i;
x_map[i].y=(-0.003296)*i;
y_map[i].y=(-0.006471)*i;
z_map[i].y=0.009768*i;
x_map[i].z=0.009410*i;
y_map[i].z=(-0.007880)*i;
z_map[i].z=(-0.001530)*i;
}
for ( ; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.298839*(1.099*i-0.099);
y_map[i].x=0.586811*(1.099*i-0.099);
z_map[i].x=0.114350*(1.099*i-0.099);
x_map[i].y=(-0.298839)*(1.099*i-0.099);
y_map[i].y=(-0.586811)*(1.099*i-0.099);
z_map[i].y=0.88600*(1.099*i-0.099);
x_map[i].z=0.70100*(1.099*i-0.099);
y_map[i].z=(-0.586811)*(1.099*i-0.099);
z_map[i].z=(-0.114350)*(1.099*i-0.099);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
x_map[i].y=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
z_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].z=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert from sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
register unsigned int
blue,
green,
red;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelRed(image,q)));
green=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelGreen(image,q)));
blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelBlue(image,q)));
pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+
primary_info.x;
pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+
primary_info.y;
pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+
primary_info.z;
SetPixelRed(image,ScaleMapToQuantum(pixel.red),q);
SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q);
SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,sRGBTransformImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
register unsigned int
blue,
green,
red;
/*
Convert PseudoClass image.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z;
image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red);
image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green);
image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorspace() sets the colorspace member of the Image structure.
%
% The format of the SetImageColorspace method is:
%
% MagickBooleanType SetImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptiionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
ImageType
type;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->colorspace == colorspace)
return(MagickTrue);
image->colorspace=colorspace;
image->rendering_intent=UndefinedIntent;
image->gamma=1.000/2.200;
(void) memset(&image->chromaticity,0,sizeof(image->chromaticity));
type=image->type;
if (IsGrayColorspace(colorspace) != MagickFalse)
{
if (colorspace == LinearGRAYColorspace)
image->gamma=1.000;
type=GrayscaleType;
}
else
if ((IsRGBColorspace(colorspace) != MagickFalse) ||
(colorspace == XYZColorspace) || (colorspace == xyYColorspace))
image->gamma=1.000;
else
{
image->rendering_intent=PerceptualIntent;
image->chromaticity.red_primary.x=0.6400;
image->chromaticity.red_primary.y=0.3300;
image->chromaticity.red_primary.z=0.0300;
image->chromaticity.green_primary.x=0.3000;
image->chromaticity.green_primary.y=0.6000;
image->chromaticity.green_primary.z=0.1000;
image->chromaticity.blue_primary.x=0.1500;
image->chromaticity.blue_primary.y=0.0600;
image->chromaticity.blue_primary.z=0.7900;
image->chromaticity.white_point.x=0.3127;
image->chromaticity.white_point.y=0.3290;
image->chromaticity.white_point.z=0.3583;
}
status=SyncImagePixelCache(image,exception);
image->type=type;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageGray() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities and changes the type of the image to
% bi-level or grayscale.
%
% The format of the SetImageGray method is:
%
% MagickBooleanType SetImageGray(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageGray(Image *image,
ExceptionInfo *exception)
{
const char
*value;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsImageGray(image))
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale",exception);
if (IsStringFalse(value) != MagickFalse)
return(MagickFalse);
type=IdentifyImageGray(image,exception);
if (type == UndefinedType)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMonochrome() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange and changes the type of the image to bi-level.
%
% The format of the SetImageMonochrome method is:
%
% MagickBooleanType SetImageMonochrome(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMonochrome(Image *image,
ExceptionInfo *exception)
{
const char
*value;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale",exception);
if (IsStringFalse(value) != MagickFalse)
return(MagickFalse);
if (IdentifyImageMonochrome(image,exception) == MagickFalse)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=BilevelType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImageColorspace() transforms an image colorspace, changing the
% image data to reflect the new colorspace.
%
% The format of the TransformImageColorspace method is:
%
% MagickBooleanType TransformImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransformImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == colorspace)
return(SetImageColorspace(image,colorspace,exception));
(void) DeleteImageProfile(image,"icc");
(void) DeleteImageProfile(image,"icm");
if (colorspace == LinearGRAYColorspace)
return(GrayscaleImage(image,Rec709LuminancePixelIntensityMethod,exception));
if (colorspace == GRAYColorspace)
return(GrayscaleImage(image,Rec709LumaPixelIntensityMethod,exception));
if (colorspace == UndefinedColorspace)
return(SetImageColorspace(image,colorspace,exception));
/*
Convert the reference image from an alternate colorspace to sRGB.
*/
if (IssRGBColorspace(colorspace) != MagickFalse)
return(TransformsRGBImage(image,exception));
status=MagickTrue;
if (IssRGBColorspace(image->colorspace) == MagickFalse)
status=TransformsRGBImage(image,exception);
if (status == MagickFalse)
return(status);
/*
Convert the reference image from sRGB to an alternate colorspace.
*/
if (sRGBTransformImage(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m s R G B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformsRGBImage() converts the reference image from an alternate
% colorspace to sRGB. The transformation matrices are not the standard ones:
% the weights are rescaled to normalize the range of the transformed values
% to be [0..QuantumRange].
%
% The format of the TransformsRGBImage method is:
%
% MagickBooleanType TransformsRGBImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertCMYToRGB(const double cyan,const double magenta,
const double yellow,double *red,double *green,double *blue)
{
*red=QuantumRange*(1.0-cyan);
*green=QuantumRange*(1.0-magenta);
*blue=QuantumRange*(1.0-yellow);
}
static inline void ConvertLMSToXYZ(const double L,const double M,const double S,
double *X,double *Y,double *Z)
{
*X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S;
*Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S;
*Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S;
}
static inline void ConvertLMSToRGB(const double L,const double M,
const double S,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertLMSToXYZ(L,M,S,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertLuvToRGB(const double L,const double u,
const double v,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline ssize_t RoundToYCC(const double value)
{
if (value <= 0.0)
return(0);
if (value >= 1388.0)
return(1388);
return((ssize_t) (value+0.5));
}
static inline void ConvertLabToRGB(const double L,const double a,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertxyYToRGB(const double low_x,const double low_y,
const double cap_Y,double *red,double *green,double *blue)
{
double
gamma,
X,
Y,
Z;
gamma=PerceptibleReciprocal(low_y);
X=gamma*cap_Y*low_x;
Y=cap_Y;
Z=gamma*cap_Y*(1.0-low_x-low_y);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+
1.4019995886561440468*(Pr-0.5));
*green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)-
0.71413649331646789076*(Pr-0.5));
*blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+
2.1453384174593273e-06*(Pr-0.5));
}
static void ConvertYCbCrToRGB(const double Y,const double Cb,
const double Cr,double *red,double *green,double *blue)
{
ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue);
}
static void ConvertYIQToRGB(const double Y,const double I,const double Q,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754*
(Q-0.5));
*green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427*
(Q-0.5));
*blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374*
(Q-0.5));
}
static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)-
0.52591263066186533*(Dr-0.5));
*green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+
0.26789932820759876*(Dr-0.5));
*blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)-
7.9202543533108e-05*(Dr-0.5));
}
static void ConvertYUVToRGB(const double Y,const double U,const double V,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825*
(V-0.5));
*green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797*
(V-0.5));
*blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04*
(V-0.5));
}
static MagickBooleanType TransformsRGBImage(Image *image,
ExceptionInfo *exception)
{
#define TransformsRGBImageTag "Transform/Image"
static const float
YCCMap[1389] =
{
0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f,
0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f,
0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f,
0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f,
0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f,
0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f,
0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f,
0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f,
0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f,
0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f,
0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f,
0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f,
0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f,
0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f,
0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f,
0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f,
0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f,
0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f,
0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f,
0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f,
0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f,
0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f,
0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f,
0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f,
0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f,
0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f,
0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f,
0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f,
0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f,
0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f,
0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f,
0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f,
0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f,
0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f,
0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f,
0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f,
0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f,
0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f,
0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f,
0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f,
0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f,
0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f,
0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f,
0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f,
0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f,
0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f,
0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f,
0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f,
0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f,
0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f,
0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f,
0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f,
0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f,
0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f,
0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f,
0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f,
0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f,
0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f,
0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f,
0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f,
0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f,
0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f,
0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f,
0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f,
0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f,
0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f,
0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f,
0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f,
0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f,
0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f,
0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f,
0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f,
0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f,
0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f,
0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f,
0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f,
0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f,
0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f,
0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f,
0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f,
0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f,
0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f,
0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f,
0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f,
0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f,
0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f,
0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f,
0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f,
0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f,
0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f,
0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f,
0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f,
0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f,
0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f,
0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f,
0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f,
0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f,
0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f,
0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f,
0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f,
0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f,
0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f,
0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f,
0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f,
0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f,
0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f,
0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f,
0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f,
0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f,
0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f,
0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f,
0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f,
0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f,
0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f,
0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f,
0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f,
0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f,
0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f,
0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f,
0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f,
0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f,
0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f,
0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f,
0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f,
0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f,
0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f,
0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f,
0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f,
0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f,
0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f,
0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f,
0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f,
0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f,
0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f,
0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f,
0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f,
0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f,
0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f,
0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f,
0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f,
0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f,
0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f,
0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f,
0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f,
0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f,
0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f,
0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f,
0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f,
0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f,
0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f,
0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f,
0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f,
0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f,
0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f,
0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f,
0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f,
0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f,
0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f,
0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f,
0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f,
0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f,
0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f,
0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f,
0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f,
0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f,
0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f,
0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f,
0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f,
0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f,
0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f,
0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f,
0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f,
0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f,
0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f,
0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f,
0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f,
0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f,
0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f,
0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f,
0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f,
0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f,
0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f,
0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f,
0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f,
0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f,
0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f,
0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f,
0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f,
0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f,
0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f,
0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f,
0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f,
0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f,
0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f,
0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f,
0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f,
0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f,
0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f,
0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f,
0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f,
0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f,
0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f,
0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f,
0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f,
0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f,
0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f,
0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f,
0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f,
0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f,
0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f,
0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f,
0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f,
0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f,
0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f,
0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f,
0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f,
0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f,
0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f,
0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f,
0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f,
0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f,
0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f,
0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f,
0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f,
0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f,
0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f,
0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f,
0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f,
0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f,
0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f,
0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f,
0.998559f, 0.999280f, 1.000000f
};
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
TransformPacket
*y_map,
*x_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
progress=0;
switch (image->colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Transform image from CMYK to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertCMYKToRGB(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
case GRAYColorspace:
{
/*
Transform linear GRAY to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=(MagickRealType) GetPixelGray(image,q);
if ((image->intensity == Rec601LuminancePixelIntensityMethod) ||
(image->intensity == Rec709LuminancePixelIntensityMethod))
gray=EncodePixelGamma(gray);
SetPixelRed(image,ClampToQuantum(gray),q);
SetPixelGreen(image,ClampToQuantum(gray),q);
SetPixelBlue(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case CMYColorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
/*
Transform image from source colorspace to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
X=QuantumScale*GetPixelRed(image,q);
Y=QuantumScale*GetPixelGreen(image,q);
Z=QuantumScale*GetPixelBlue(image,q);
switch (image->colorspace)
{
case CMYColorspace:
{
ConvertCMYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLColorspace:
{
ConvertHCLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSBColorspace:
{
ConvertHSBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSIColorspace:
{
ConvertHSIToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSLColorspace:
{
ConvertHSLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSVColorspace:
{
ConvertHSVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HWBColorspace:
{
ConvertHWBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LabColorspace:
{
ConvertLabToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LMSColorspace:
{
ConvertLMSToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LuvColorspace:
{
ConvertLuvToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case xyYColorspace:
{
ConvertxyYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case XYZColorspace:
{
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YCbCrColorspace:
{
ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YDbDrColorspace:
{
ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YIQColorspace:
{
ConvertYIQToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YPbPrColorspace:
{
ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YUVColorspace:
{
ConvertYUVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
default:
{
red=QuantumRange*X;
green=QuantumRange*Y;
blue=QuantumRange*Z;
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform Log to sRGB colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/
film_gamma);
for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++)
logmap[i]=(Quantum) 0;
for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++)
logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)*
(pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/
film_gamma)-black));
for ( ; i <= (ssize_t) MaxMap; i++)
logmap[i]=QuantumRange;
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))];
green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))];
blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))];
SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
red)),q);
SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
green)),q);
SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
blue)),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
switch (image->colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
R = I1+1.00000*I2-0.66668*I3
G = I1+0.00000*I2+1.33333*I3
B = I1-1.00000*I2-0.66668*I3
I and Q, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.402000*Cr
G = Y-0.344136*Cb-0.714136*Cr
B = Y+1.772000*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.99999999999914679361*(double) i;
y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap);
z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap);
x_map[i].y=0.99999975910502514331*(double) i;
y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap);
z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap);
x_map[i].z=1.00000124040004623180*(double) i;
y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap);
z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.574800*Cr
G = Y-0.187324*Cb-0.468124*Cr
B = Y+1.855600*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*i);
y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*i);
y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*i);
y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap));
z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
R = Y +1.340762*C2
G = Y-0.317038*C1-0.682243*C2
B = Y+1.632639*C1
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.3584000*(double) i);
y_map[i].x=(MagickRealType) 0.0000000;
z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].y=(MagickRealType) (1.3584000*(double) i);
y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].z=(MagickRealType) (1.3584000*(double) i);
y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].z=(MagickRealType) 0.0000000;
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
x_map[i].y=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
z_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].z=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert to sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
blue,
green,
red;
red=ScaleQuantumToMap(GetPixelRed(image,q));
green=ScaleQuantumToMap(GetPixelGreen(image,q));
blue=ScaleQuantumToMap(GetPixelBlue(image,q));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
SetPixelRed(image,ClampToQuantum(pixel.red),q);
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TransformsRGBImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
/*
Convert PseudoClass image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
register size_t
blue,
green,
red;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
image->colormap[i].red=(double) ClampToQuantum(pixel.red);
image->colormap[i].green=(double) ClampToQuantum(pixel.green);
image->colormap[i].blue=(double) ClampToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(MagickTrue);
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
8393c2f5_gcc_so4_prot.c | #define _POSIX_C_SOURCE 200809L
#define START_TIMER(S) struct timeval start_ ## S , end_ ## S ; gettimeofday(&start_ ## S , NULL);
#define STOP_TIMER(S,T) gettimeofday(&end_ ## S, NULL); T->S += (double)(end_ ## S .tv_sec-start_ ## S.tv_sec)+(double)(end_ ## S .tv_usec-start_ ## S .tv_usec)/1000000;
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int * size;
int * npsize;
int * dsize;
int * hsize;
int * hofs;
int * oofs;
} ;
struct profiler
{
double section0;
} ;
int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict usol_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads_nonaffine, struct profiler * timers)
{
int (*restrict block_sizes) __attribute__ ((aligned (64))) = (int (*)) block_sizes_vec->data;
float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data;
int (*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__ ((aligned (64))) = (int (*)[nnz_sp_source_mask_vec->size[1]]) nnz_sp_source_mask_vec->data;
float (*restrict save_src)[save_src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[save_src_vec->size[1]]) save_src_vec->data;
int (*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__ ((aligned (64))) = (int (*)[source_id_vec->size[1]][source_id_vec->size[2]]) source_id_vec->data;
float (*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[source_mask_vec->size[1]][source_mask_vec->size[2]]) source_mask_vec->data;
int (*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__ ((aligned (64))) = (int (*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]]) sp_source_mask_vec->data;
float (*restrict usol)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]]) usol_vec->data;
float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data;
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
int xb_size = block_sizes[0];
int yb_size = block_sizes[1];
int x0_blk0_size = block_sizes[2];
int y0_blk0_size = block_sizes[3];
for (int time = time_m, t0 = (time)%(3), t1 = (time + 2)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 2)%(3), t2 = (time + 1)%(3))
{
/* Begin section0 */
START_TIMER(section0)
#pragma omp parallel num_threads(nthreads_nonaffine)
{
int chunk_size = (int)(fmax(1, (1.0F/3.0F)*(x_M - x_m + 1)/nthreads_nonaffine));
#pragma omp for collapse(1) schedule(dynamic,chunk_size)
for (int x = x_m; x <= x_M; x += 1)
{
#pragma omp simd aligned(damp,nnz_sp_source_mask,save_src,source_id,source_mask,sp_source_mask,usol,vp:32)
for (int y = y_m; y <= y_M; y += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
float r9 = -2.5F*usol[t0][x + 4][y + 4][z + 4];
float r8 = 1.0/dt;
float r7 = 1.0/(dt*dt);
float r6 = 1.0/(vp[x + 4][y + 4][z + 4]*vp[x + 4][y + 4][z + 4]);
usol[t2][x + 4][y + 4][z + 4] = (r6*(-r7*(-2.0F*usol[t0][x + 4][y + 4][z + 4] + usol[t1][x + 4][y + 4][z + 4])) + r8*(damp[x + 1][y + 1][z + 1]*usol[t0][x + 4][y + 4][z + 4]) + (r9 - 8.33333333e-2F*(usol[t0][x + 4][y + 4][z + 2] + usol[t0][x + 4][y + 4][z + 6]) + 1.33333333F*(usol[t0][x + 4][y + 4][z + 3] + usol[t0][x + 4][y + 4][z + 5]))/((h_z*h_z)) + (r9 - 8.33333333e-2F*(usol[t0][x + 4][y + 2][z + 4] + usol[t0][x + 4][y + 6][z + 4]) + 1.33333333F*(usol[t0][x + 4][y + 3][z + 4] + usol[t0][x + 4][y + 5][z + 4]))/((h_y*h_y)) + (r9 - 8.33333333e-2F*(usol[t0][x + 2][y + 4][z + 4] + usol[t0][x + 6][y + 4][z + 4]) + 1.33333333F*(usol[t0][x + 3][y + 4][z + 4] + usol[t0][x + 5][y + 4][z + 4]))/((h_x*h_x)))/(r6*r7 + r8*damp[x + 1][y + 1][z + 1]);
}
int sp_zi_M = nnz_sp_source_mask[x][y] - 1;
for (int sp_zi = sp_zi_m; sp_zi <= sp_zi_M; sp_zi += 1)
{
int zind = sp_source_mask[x][y][sp_zi];
float r0 = save_src[time][source_id[x][y][zind]]*source_mask[x][y][zind];
usol[t2][x + 4][y + 4][zind + 4] += r0;
}
}
}
}
STOP_TIMER(section0,timers)
/* End section0 */
}
return 0;
}
/* Backdoor edit at Wed Jan 20 16:52:34 2021*/
|
rii.h | #ifndef RII_H
#define RII_H
#include <iostream>
#include <cassert>
#include "./pqkmeans.h"
#include "./distance.h"
// For py::array_t
// See http://pybind11.readthedocs.io/en/master/advanced/pycpp/numpy.html#direct-access
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
namespace py = pybind11;
namespace rii {
struct DistanceTable{
// Helper structure. This is identical to vec<vec<float>> dt(M, vec<float>(Ks))
DistanceTable() {}
DistanceTable(size_t M, size_t Ks) : Ks_(Ks), data_(M * Ks) {}
void SetVal(size_t m, size_t ks, float val) {
data_[m * Ks_ + ks] = val;
}
float GetVal(size_t m, size_t ks) const {
return data_[m * Ks_ + ks];
}
size_t Ks_;
std::vector<float> data_;
};
class RiiCpp {
public:
RiiCpp() {} // Shouldn't be default-constructed
RiiCpp(const py::array_t<float> &codewords, bool verbose);
// ===== Functions that can be called from Python =====
//void SetCodewords(const py::array_t<float> &codewords); // This should be called first
void Reconfigure(int nlist, int iter);
void AddCodes(const py::array_t<unsigned char> &codes, bool update_flag);
// The default integers of Python is int64 (long long), so the type of target_ids is long long
std::pair<std::vector<size_t>, std::vector<float>> QueryLinear(const py::array_t<float> &query,
int topk,
const py::array_t<long long> &target_ids) const;
std::pair<std::vector<size_t>, std::vector<float>> QueryIvf(const py::array_t<float> &query,
int topk,
const py::array_t<long long> &target_ids,
int L) const;
void Clear();
// ===== Functions that would not be called from Python (Used inside c++) =====
void UpdatePostingLists(size_t start, size_t num);
DistanceTable DTable(const py::array_t<float> &vec) const;
float ADist(const DistanceTable &dtable, const std::vector<unsigned char> &code) const;
float ADist(const DistanceTable &dtable, const std::vector<unsigned char> &flattened_codes, size_t n) const;
std::pair<std::vector<size_t>, std::vector<float>> PairVectorToVectorPair(const std::vector<std::pair<size_t, float>> &pair_vec) const;
// Property getter
size_t GetN() const {return flattened_codes_.size() / M_;}
size_t GetNumList() const {return coarse_centers_.size();}
// Given a long (N * M) codes, pick up n-th code
std::vector<unsigned char> NthCode(const std::vector<unsigned char> &long_code, size_t n) const;
// Given a long (N * M) codes, pick up m-th element from n-th code
unsigned char NthCodeMthElement(const std::vector<unsigned char> &long_code, std::size_t n, size_t m) const;
// Member variables
size_t M_, Ks_;
bool verbose_;
std::vector<std::vector<std::vector<float>>> codewords_; // (M, Ks, Ds)
std::vector<std::vector<unsigned char>> coarse_centers_; // (NumList, M)
std::vector<unsigned char> flattened_codes_; // (N, M) PQ codes are flattened to N * M long array
std::vector<std::vector<int>> posting_lists_; // (NumList, any)
};
RiiCpp::RiiCpp(const py::array_t<float> &codewords, bool verbose)
{
verbose_ = verbose;
const auto &r = codewords.unchecked<3>(); // codewords must have ndim=3, with non-writable
M_ = (size_t) r.shape(0);
Ks_ = (size_t) r.shape(1);
size_t Ds = (size_t) r.shape(2);
codewords_.resize(M_, std::vector<std::vector<float>>(Ks_, std::vector<float>(Ds)));
for (ssize_t m = 0; m < r.shape(0); ++m) {
for (ssize_t ks = 0; ks < r.shape(1); ++ks) {
for (ssize_t ds = 0; ds < r.shape(2); ++ds) {
codewords_[m][ks][ds] = r(m, ks, ds);
}
}
}
if (verbose_) {
// Check which SIMD functions are used. See distance.h for this global variable.
std::cout << "SIMD support: " << g_simd_architecture << std::endl;
}
}
void RiiCpp::Reconfigure(int nlist, int iter)
{
assert(0 < nlist);
assert((size_t) nlist <= GetN());
// ===== (1) Sampling vectors for pqk-means =====
// Since clustering takes time, we use a subset of all codes for clustering.
size_t len_for_clustering = std::min(GetN(), (size_t) nlist * 100);
if (verbose_) {
std::cout << "The number of vectors used for training of coarse centers: " << len_for_clustering << std::endl;
}
// Prepare a random set of integers, drawn from [0, ..., N-1], where the cardinality of the set is len_for_clustering
std::vector<size_t> ids_for_clustering(GetN()); // This can be large and might be the bootle neck of memory consumption
std::iota(ids_for_clustering.begin(), ids_for_clustering.end(), 0); // 0, 1, 2, ...
std::shuffle(ids_for_clustering.begin(), ids_for_clustering.end(), std::default_random_engine(123));
ids_for_clustering.resize(len_for_clustering);
ids_for_clustering.shrink_to_fit(); // For efficient memory usage
std::vector<unsigned char> flattened_codes_randomly_picked; // size=len_for_clustering
flattened_codes_randomly_picked.reserve(len_for_clustering * M_);
for (const auto &id : ids_for_clustering) { // Pick up vectors to construct a training set
std::vector<unsigned char> code = NthCode(flattened_codes_, id);
flattened_codes_randomly_picked.insert(flattened_codes_randomly_picked.end(),
code.begin(), code.end());
}
assert(flattened_codes_randomly_picked.size() == len_for_clustering * M_);
// ===== (2) Run pqk-means =====
if (verbose_) {std::cout << "Start to run PQk-means" << std::endl;}
pqkmeans::PQKMeans clustering_instance(codewords_, nlist, iter, verbose_);
clustering_instance.fit(flattened_codes_randomly_picked);
// ===== (3) Update coarse centers =====
coarse_centers_ = clustering_instance.GetClusterCenters();
assert(coarse_centers_.size() == (size_t) nlist);
assert(coarse_centers_[0].size() == M_);
// ===== (4) Update posting lists =====
if (verbose_) {std::cout << "Start to update posting lists" << std::endl;}
posting_lists_.clear();
posting_lists_.resize(nlist);
for (auto &posting_list : posting_lists_) {
posting_list.reserve(GetN() / nlist); // Roughly malloc
}
UpdatePostingLists(0, GetN());
}
void RiiCpp::AddCodes(const py::array_t<unsigned char> &codes, bool update_flag)
{
// (1) Add new input codes to flatted_codes. This imply pushes back the elements.
// After that, if update_flg=true, (2) update posting lists for the input codes.
// Note that update_flag should be true in usual cases. It should be false
// if (1) this is the first call of AddCodes (i.e., calling in add_configure()),
// of (2) you've decided to call reconfigure() manually after add()
if (update_flag && coarse_centers_.empty()) {
std::cerr << "Error. reconfigure() must be called before running add(vecs=X, update_posting_lists=True)."
<< "If this is the first addition, please call add_configure(vecs=X)" << std::endl;
throw;
}
// ===== (1) Add codes to flattened_codes =====
const auto &r = codes.unchecked<2>(); // codes must have ndim=2; with non-writeable
size_t N = (size_t) r.shape(0);
assert(M_ == (size_t) r.shape(1));
size_t N0 = GetN();
flattened_codes_.resize( (N0 + N) * M_);
for (size_t n = 0; n < N; ++n) {
for (size_t m = 0; m < M_; ++m) {
flattened_codes_[ (N0 + n) * M_ + m] = r(n, m);
}
}
if (verbose_) {
std::cout << N << " new vectors are added." << std::endl;
std::cout << "Total number of codes is " << GetN() << std::endl;
}
// ===== (2) Update posting lists =====
if (update_flag) {
if (verbose_) { std::cout << "Start to update posting lists" << std::endl; }
UpdatePostingLists(N0, N);
}
}
std::pair<std::vector<size_t>, std::vector<float> > RiiCpp::QueryLinear(const py::array_t<float> &query,
int topk,
const py::array_t<long long> &target_ids) const
{
const auto &tids = target_ids.unchecked<1>(); // target_ids must have ndim = 1; can be non-writeable
size_t S = tids.shape(0); // The number of target_ids. It might be 0 if not specified.
assert((size_t) topk <= GetN());
// ===== (1) Create dtable =====
DistanceTable dtable = DTable(query);
// ===== (2) Run PQ linear search =====
// [todo] Can be SIMDized?
std::vector<std::pair<size_t, float>> scores;
if (S == 0) { // No target ids
size_t N = GetN();
scores.resize(N);
#pragma omp parallel for
for (size_t n = 0; n < N; ++n) {
scores[n] = {n, ADist(dtable, flattened_codes_, n)};
}
} else { // Target ids are specified
assert((size_t) topk <= S);
assert(S <= GetN());
scores.resize(S);
#pragma omp parallel for
for (size_t s = 0; s < S; ++s) {
size_t tid = static_cast<size_t>(tids(s));
scores[s] = {tid, ADist(dtable, flattened_codes_, tid)};
}
}
// ===== (3) Sort them =====
// [todo] Can be parallelized?
std::partial_sort(scores.begin(), scores.begin() + topk, scores.end(),
[](const std::pair<size_t, float> &a, const std::pair<size_t, float> &b){return a.second < b.second;});
scores.resize(topk);
scores.shrink_to_fit();
// ===== (4) Return the result, in the form of pair<vec, vec> =====
// Note that this returns two lists, not np.array
return PairVectorToVectorPair(scores);
}
std::pair<std::vector<size_t>, std::vector<float> > RiiCpp::QueryIvf(const py::array_t<float> &query,
int topk,
const py::array_t<long long> &target_ids,
int L) const
{
const auto &tids = target_ids.unchecked<1>(); // target_ids must have ndim = 1 with non-writeable
size_t S = tids.shape(0); // The number of target_ids. It might be 0 if not specified.
assert((size_t) topk <= GetN());
assert(topk <= L && (size_t) L <= GetN());
// ===== (1) Create dtable =====
DistanceTable dtable = DTable(query);
// ===== (2) Compare to coarse centers and sort the results =====
std::vector<std::pair<size_t, float>> scores_coarse(coarse_centers_.size());
size_t nlist = GetNumList();
//#pragma omp parallel for
for (size_t no = 0; no < nlist; ++no) {
scores_coarse[no] = {no, ADist(dtable, coarse_centers_[no])};
}
// ===== (3) Partial sort the coarse results. =====
size_t w; // The number of posting lists to be considered
if (S == 0) {
w = (size_t) std::round((double) L * GetNumList() / GetN());
} else {
assert((size_t) topk <= S && S <= GetN());
w = (size_t) std::round((double) L * GetNumList() / S);
}
w += 3; // Top poslists might contain a few items, so we set w litter bit bigger for insurance
if (nlist < w) { // If w is bigger than the original nlist, let's set back nlist
w = nlist;
}
std::partial_sort(scores_coarse.begin(), scores_coarse.begin() + w, scores_coarse.end(),
[](const std::pair<size_t, float> &a, const std::pair<size_t, float> &b){return a.second < b.second;});
// ===== (4) Traverse posting list =====
std::vector<std::pair<size_t, float>> scores;
scores.reserve(L);
int coarse_cnt = 0;
for (const auto &score_coarse : scores_coarse) {
size_t no = score_coarse.first;
coarse_cnt++;
// [todo] This loop can be parallelized
for (const auto &n : posting_lists_[no]) {
// ===== (5) If id is not included in target_ids, skip. =====
// Note that if S==0 (target is all), then evaluate all IDs
if (S != 0 && !std::binary_search(target_ids.data(), target_ids.data() + S, static_cast<long long>(n))) {
continue;
}
// ===== (6) Evaluate n =====
scores.emplace_back(n, ADist(dtable, flattened_codes_, n));
// ===== (7) If scores are collected enough =====
if (scores.size() == (size_t) L) {
goto finish;
}
}
// If w coarse centers are traversed and still L items are not found while more than topk items are found,
// we terminate the process and do the final reranking
if ( (size_t) coarse_cnt == w && scores.size() >= (unsigned long) topk) {
finish:
// ===== (8) Sort them =====
std::partial_sort(scores.begin(), scores.begin() + topk, scores.end(),
[](const std::pair<size_t, float> &a, const std::pair<size_t, float> &b){return a.second < b.second;});
scores.resize(topk);
scores.shrink_to_fit();
// ===== (9) Return the result, in the form of pair<vec, vec> =====
// Note that this returns two lists, not np.array
return PairVectorToVectorPair(scores);
}
}
// It can be happened that vectors are not found
return std::pair<std::vector<size_t>, std::vector<float>>({}, {});
}
void RiiCpp::Clear()
{
coarse_centers_.clear();
flattened_codes_.clear();
posting_lists_.clear();
}
void RiiCpp::UpdatePostingLists(size_t start, size_t num)
{
// Update (add) identifiers to posting lists, from codes[start] to codes[start + num -1]
// This just add IDs, so be careful to call this (e.g., the same IDs will be added if you call
// this funcs twice at the same time, that would be not expected behavior)
assert(start <= GetN());
assert(start + num <= GetN());
// ===== (1) Construct a dummy pqkmeans class for computing Symmetric Distance =====
pqkmeans::PQKMeans clustering_instance(codewords_, GetNumList(), 0, true);
clustering_instance.SetClusterCenters(coarse_centers_);
// ===== (2) Update posting lists =====
std::vector<size_t> assign(num);
#pragma omp parallel for
for (size_t n = 0; n < num; ++n) {
assign[n] = clustering_instance.predict_one(NthCode(flattened_codes_, start + n));
}
for (size_t n = 0; n < num; ++n) {
posting_lists_[assign[n]].push_back(start + n);
}
}
DistanceTable RiiCpp::DTable(const py::array_t<float> &vec) const
{
const auto &v = vec.unchecked<1>();
size_t Ds = codewords_[0][0].size();
assert((size_t) v.shape(0) == M_ * Ds);
DistanceTable dtable(M_, Ks_);
for (size_t m = 0; m < M_; ++m) {
for (size_t ks = 0; ks < Ks_; ++ks) {
dtable.SetVal(m, ks, fvec_L2sqr(&(v(m * Ds)), codewords_[m][ks].data(), Ds));
}
}
return dtable;
}
float RiiCpp::ADist(const DistanceTable &dtable, const std::vector<unsigned char> &code) const
{
assert(code.size() == M_);
float dist = 0;
for (size_t m = 0; m < M_; ++m) {
unsigned char ks = code[m];
dist += dtable.GetVal(m, ks);
}
return dist;
}
float RiiCpp::ADist(const DistanceTable &dtable, const std::vector<unsigned char> &flattened_codes, size_t n) const
{
float dist = 0;
for (size_t m = 0; m < M_; ++m) {
unsigned char ks = NthCodeMthElement(flattened_codes, n, m);
dist += dtable.GetVal(m, ks);
}
return dist;
}
std::pair<std::vector<size_t>, std::vector<float> > RiiCpp::PairVectorToVectorPair(const std::vector<std::pair<size_t, float> > &pair_vec) const
{
std::pair<std::vector<size_t>, std::vector<float>> vec_pair(std::vector<size_t>(pair_vec.size()), std::vector<float>(pair_vec.size()));
for(size_t n = 0, N = pair_vec.size(); n < N; ++n) {
vec_pair.first[n] = pair_vec[n].first;
vec_pair.second[n] = pair_vec[n].second;
}
return vec_pair;
}
std::vector<unsigned char> RiiCpp::NthCode(const std::vector<unsigned char> &long_code, size_t n) const
{
return std::vector<unsigned char>(long_code.begin() + n * M_, long_code.begin() + (n + 1) * M_);
}
unsigned char RiiCpp::NthCodeMthElement(const std::vector<unsigned char> &long_code, std::size_t n, size_t m) const
{
return long_code[ n * M_ + m];
}
} // namespace rii
#endif // RII_H
|
10.norace4.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 100
int main() {
int sum = 0;
#pragma omp parallel num_threads(8)
{
#pragma omp sections reduction(+ : sum)
{
for (int i = 0; i < N; i++) {
sum += i;
}
#pragma omp section
for (int i = 0; i < N; i++) {
sum += i * i;
}
}
}
return sum % (N * N);
}
// CHECK: Region is Data Race Free.
// END
|
graph.h | #pragma once
#include "util/timer.h"
#include <sys/time.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <unordered_map>
#include <vector>
#include <chrono>
#include <fstream>
#include "util/util.h"
#include "util/log/log.h"
using namespace std;
typedef int vid_t;
//typedef unsigned int eid_t;
typedef size_t eid_t;
typedef struct {
long n;
long m;
vid_t *adj;
eid_t *num_edges;
eid_t *eid;
} graph_t;
typedef struct {
long n;
long m;
vid_t *adj;
uint32_t *num_edges;
uint32_t *eid;
} cuda_graph_t;
//Define an Edge data type
struct Edge {
vid_t u;
vid_t v;
Edge() {
this->u = 0;
this->v = 0;
}
Edge(vid_t u, vid_t v) {
this->u = u;
this->v = v;
}
};
void free_graph(graph_t *g);
void getEidAndEdgeList(graph_t *g, Edge *idToEdge);
template<typename T>
struct Graph {
string dir;
uint32_t nodemax;
T edgemax;
// csr representation
T *node_off;
int *edge_dst;
vector<int> degree;
explicit Graph(char *dir_cstr);
public:
void ReadDegree();
void CheckInputGraph();
void ReadAdjacencyList();
};
template<typename T>
Graph<T>::Graph(char *dir_cstr) {
dir = string(dir_cstr);
// clear the 4 bytes
edgemax = 0;
ReadDegree();
ReadAdjacencyList();
CheckInputGraph();
}
using namespace std::chrono;
template<typename T>
void Graph<T>::ReadDegree() {
auto start = high_resolution_clock::now();
ifstream deg_file(dir + string("/b_degree.bin"), ios::binary);
int int_size = 0;
deg_file.read(reinterpret_cast<char *>(&int_size), 4);
if (int_size != sizeof(T)) {
log_warn("int_size != sizeof(T), %d, %d", int_size, sizeof(T));
}
deg_file.read(reinterpret_cast<char *>(&nodemax), sizeof(int));
deg_file.read(reinterpret_cast<char *>(&edgemax), int_size);
log_info("int size: %d, n: %s, m: %s", int_size, FormatWithCommas(nodemax).c_str(),
FormatWithCommas(edgemax).c_str());
degree.resize(static_cast<unsigned long>(nodemax));
deg_file.read(reinterpret_cast<char *>(°ree.front()), sizeof(int) * nodemax);
auto end = high_resolution_clock::now();
log_info("read degree file time: %.3lf s", duration_cast<milliseconds>(end - start).count() / 1000.0);
}
template<typename T>
void Graph<T>::ReadAdjacencyList() {
auto start = high_resolution_clock::now();
ifstream adj_file(dir + string("/b_adj.bin"), ios::binary);
// csr representation
node_off = (T *) malloc(sizeof(T) * (nodemax + 1));
edge_dst = static_cast<int *>(malloc(sizeof(int) * static_cast<uint64_t>(edgemax + 16)));
string dst_v_file_name = dir + string("/b_adj.bin");
auto dst_v_fd = open(dst_v_file_name.c_str(), O_RDONLY, S_IRUSR | S_IWUSR);
int *buffer = (int *) mmap(0, static_cast<uint64_t >(edgemax) * 4u, PROT_READ, MAP_PRIVATE, dst_v_fd, 0);
// prefix sum
node_off[0] = 0;
for (auto i = 0u; i < nodemax; i++) { node_off[i + 1] = node_off[i] + degree[i]; }
auto end = high_resolution_clock::now();
log_info("malloc, and sequential-scan time: %.3lf s", duration_cast<milliseconds>(end - start).count() / 1000.0);
// load dst vertices into the array
#pragma omp parallel for schedule(dynamic, 1000)
for (auto i = 0u; i < nodemax; i++) {
// copy to the high memory bandwidth mem
for (uint64_t offset = node_off[i]; offset < node_off[i + 1]; offset++) {
edge_dst[offset] = buffer[offset];
}
// inclusive
degree[i]++;
}
munmap(buffer, static_cast<uint64_t >(edgemax) * 4u);
#ifdef VERIFY_INPUT
// Verify.
#pragma omp parallel for schedule(dynamic, 1000)
for (auto u = 0u; u < nodemax; u++) {
for (size_t offset = node_off[u]; offset < node_off[u + 1]; offset++) {
auto v = edge_dst[offset];
if (BranchFreeBinarySearch(edge_dst, node_off[v], node_off[v + 1], (int) u) == node_off[v + 1]) {
log_fatal("CSR not correct...");
exit(-1);
}
}
}
log_info("CSR verify pass");
#endif
auto end2 = high_resolution_clock::now();
log_info("read adjacency list file time: %.3lf s", duration_cast<milliseconds>(end2 - end).count() / 1000.0);
}
template<typename T>
void Graph<T>::CheckInputGraph() {
auto start = high_resolution_clock::now();
#pragma omp parallel for schedule(dynamic, 5000)
for (auto i = 0u; i < nodemax; i++) {
for (auto j = node_off[i]; j < node_off[i + 1]; j++) {
if (edge_dst[j] == static_cast<int>(i)) {
log_error("Self loop of v: %d", i);
exit(1);
}
if (j > node_off[i] && edge_dst[j] <= edge_dst[j - 1]) {
log_error("Edges not sorted in increasing id order!\nThe program may not run properly!");
exit(1);
}
}
}
auto end = high_resolution_clock::now();
log_info("check input graph file time: %.3lf s", duration_cast<milliseconds>(end - start).count() / 1000.0);
}
double timer();
|
compatibility.h | // -*- C++ -*-
// Copyright (C) 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/compatibility.h
* @brief Compatibility layer, mostly concerned with atomic operations.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_COMPATIBILITY_H
#define _GLIBCXX_PARALLEL_COMPATIBILITY_H 1
#include <parallel/types.h>
#include <parallel/base.h>
#if defined(__SUNPRO_CC) && defined(__sparc)
#include <sys/atomic.h>
#endif
#if !defined(_WIN32) || defined (__CYGWIN__)
#include <sched.h>
#endif
#if defined(_MSC_VER)
#include <Windows.h>
#include <intrin.h>
#undef max
#undef min
#endif
#ifdef __MINGW32__
// Including <windows.h> will drag in all the windows32 names. Since
// that can cause user code portability problems, we just declare the
// one needed function here.
extern "C"
__attribute((dllimport)) void __attribute__((stdcall)) Sleep (unsigned long);
#endif
namespace __gnu_parallel
{
#if defined(__ICC)
template<typename _MustBeInt = int>
int32_t __faa32(int32_t* __x, int32_t __inc)
{
asm volatile("lock xadd %0,%1"
: "=__r" (__inc), "=__m" (*__x)
: "0" (__inc)
: "memory");
return __inc;
}
#if defined(__x86_64)
template<typename _MustBeInt = int>
int64_t __faa64(int64_t* __x, int64_t __inc)
{
asm volatile("lock xadd %0,%1"
: "=__r" (__inc), "=__m" (*__x)
: "0" (__inc)
: "memory");
return __inc;
}
#endif
#endif
// atomic functions only work on integers
/** @brief Add a value to a variable, atomically.
*
* Implementation is heavily platform-dependent.
* @param __ptr Pointer to a 32-bit signed integer.
* @param __addend Value to add.
*/
inline int32_t
__fetch_and_add_32(volatile int32_t* __ptr, int32_t __addend)
{
#if defined(__ICC) //x86 version
return _InterlockedExchangeAdd((void*)__ptr, __addend);
#elif defined(__ECC) //IA-64 version
return _InterlockedExchangeAdd((void*)__ptr, __addend);
#elif defined(__ICL) || defined(_MSC_VER)
return _InterlockedExchangeAdd(reinterpret_cast<volatile long*>(__ptr),
__addend);
#elif defined(__GNUC__)
return __sync_fetch_and_add(__ptr, __addend);
#elif defined(__SUNPRO_CC) && defined(__sparc)
volatile int32_t __before, __after;
do
{
__before = *__ptr;
__after = __before + __addend;
} while (atomic_cas_32((volatile unsigned int*)__ptr, __before,
__after) != __before);
return __before;
#else //fallback, slow
#pragma message("slow __fetch_and_add_32")
int32_t __res;
#pragma omp critical
{
__res = *__ptr;
*(__ptr) += __addend;
}
return __res;
#endif
}
/** @brief Add a value to a variable, atomically.
*
* Implementation is heavily platform-dependent.
* @param __ptr Pointer to a 64-bit signed integer.
* @param __addend Value to add.
*/
inline int64_t
__fetch_and_add_64(volatile int64_t* __ptr, int64_t __addend)
{
#if defined(__ICC) && defined(__x86_64) //x86 version
return __faa64<int>((int64_t*)__ptr, __addend);
#elif defined(__ECC) //IA-64 version
return _InterlockedExchangeAdd64((void*)__ptr, __addend);
#elif defined(__ICL) || defined(_MSC_VER)
#ifndef _WIN64
_GLIBCXX_PARALLEL_ASSERT(false); //not available in this case
return 0;
#else
return _InterlockedExchangeAdd64(__ptr, __addend);
#endif
#elif defined(__GNUC__) && defined(__x86_64)
return __sync_fetch_and_add(__ptr, __addend);
#elif defined(__GNUC__) && defined(__i386) && \
(defined(__i686) || defined(__pentium4) || defined(__athlon) \
|| defined(__k8) || defined(__core2))
return __sync_fetch_and_add(__ptr, __addend);
#elif defined(__SUNPRO_CC) && defined(__sparc)
volatile int64_t __before, __after;
do
{
__before = *__ptr;
__after = __before + __addend;
} while (atomic_cas_64((volatile unsigned long long*)__ptr, __before,
__after) != __before);
return __before;
#else //fallback, slow
#if defined(__GNUC__) && defined(__i386)
// XXX doesn'__t work with -march=native
//#warning "please compile with -march=i686 or better"
#endif
#pragma message("slow __fetch_and_add_64")
int64_t __res;
#pragma omp critical
{
__res = *__ptr;
*(__ptr) += __addend;
}
return __res;
#endif
}
/** @brief Add a value to a variable, atomically.
*
* Implementation is heavily platform-dependent.
* @param __ptr Pointer to a signed integer.
* @param __addend Value to add.
*/
template<typename _Tp>
inline _Tp
__fetch_and_add(volatile _Tp* __ptr, _Tp __addend)
{
if (sizeof(_Tp) == sizeof(int32_t))
return
(_Tp)__fetch_and_add_32((volatile int32_t*) __ptr, (int32_t)__addend);
else if (sizeof(_Tp) == sizeof(int64_t))
return
(_Tp)__fetch_and_add_64((volatile int64_t*) __ptr, (int64_t)__addend);
else
_GLIBCXX_PARALLEL_ASSERT(false);
}
#if defined(__ICC)
template<typename _MustBeInt = int>
inline int32_t
__cas32(volatile int32_t* __ptr, int32_t __old, int32_t __nw)
{
int32_t __before;
__asm__ __volatile__("lock; cmpxchgl %1,%2"
: "=a"(__before)
: "q"(__nw), "__m"(*(volatile long long*)(__ptr)),
"0"(__old)
: "memory");
return __before;
}
#if defined(__x86_64)
template<typename _MustBeInt = int>
inline int64_t
__cas64(volatile int64_t *__ptr, int64_t __old, int64_t __nw)
{
int64_t __before;
__asm__ __volatile__("lock; cmpxchgq %1,%2"
: "=a"(__before)
: "q"(__nw), "__m"(*(volatile long long*)(__ptr)),
"0"(__old)
: "memory");
return __before;
}
#endif
#endif
/** @brief Compare @c *__ptr and @c __comparand. If equal, let @c
* *__ptr=__replacement and return @c true, return @c false otherwise.
*
* Implementation is heavily platform-dependent.
* @param __ptr Pointer to 32-bit signed integer.
* @param __comparand Compare value.
* @param __replacement Replacement value.
*/
inline bool
__compare_and_swap_32(volatile int32_t* __ptr, int32_t __comparand,
int32_t __replacement)
{
#if defined(__ICC) //x86 version
return _InterlockedCompareExchange((void*)__ptr, __replacement,
__comparand) == __comparand;
#elif defined(__ECC) //IA-64 version
return _InterlockedCompareExchange((void*)__ptr, __replacement,
__comparand) == __comparand;
#elif defined(__ICL) || defined(_MSC_VER)
return _InterlockedCompareExchange(
reinterpret_cast<volatile long*>(__ptr),
__replacement, __comparand)
== __comparand;
#elif defined(__GNUC__)
return __sync_bool_compare_and_swap(__ptr, __comparand, __replacement);
#elif defined(__SUNPRO_CC) && defined(__sparc)
return atomic_cas_32((volatile unsigned int*)__ptr, __comparand,
__replacement) == __comparand;
#else
#pragma message("slow __compare_and_swap_32")
bool __res = false;
#pragma omp critical
{
if (*__ptr == __comparand)
{
*__ptr = __replacement;
__res = true;
}
}
return __res;
#endif
}
/** @brief Compare @c *__ptr and @c __comparand. If equal, let @c
* *__ptr=__replacement and return @c true, return @c false otherwise.
*
* Implementation is heavily platform-dependent.
* @param __ptr Pointer to 64-bit signed integer.
* @param __comparand Compare value.
* @param __replacement Replacement value.
*/
inline bool
__compare_and_swap_64(volatile int64_t* __ptr, int64_t __comparand,
int64_t __replacement)
{
#if defined(__ICC) && defined(__x86_64) //x86 version
return __cas64<int>(__ptr, __comparand, __replacement) == __comparand;
#elif defined(__ECC) //IA-64 version
return _InterlockedCompareExchange64((void*)__ptr, __replacement,
__comparand) == __comparand;
#elif defined(__ICL) || defined(_MSC_VER)
#ifndef _WIN64
_GLIBCXX_PARALLEL_ASSERT(false); //not available in this case
return 0;
#else
return _InterlockedCompareExchange64(__ptr, __replacement,
__comparand) == __comparand;
#endif
#elif defined(__GNUC__) && defined(__x86_64)
return __sync_bool_compare_and_swap(__ptr, __comparand, __replacement);
#elif defined(__GNUC__) && defined(__i386) && \
(defined(__i686) || defined(__pentium4) || defined(__athlon) \
|| defined(__k8) || defined(__core2))
return __sync_bool_compare_and_swap(__ptr, __comparand, __replacement);
#elif defined(__SUNPRO_CC) && defined(__sparc)
return atomic_cas_64((volatile unsigned long long*)__ptr,
__comparand, __replacement) == __comparand;
#else
#if defined(__GNUC__) && defined(__i386)
// XXX -march=native
//#warning "please compile with -march=i686 or better"
#endif
#pragma message("slow __compare_and_swap_64")
bool __res = false;
#pragma omp critical
{
if (*__ptr == __comparand)
{
*__ptr = __replacement;
__res = true;
}
}
return __res;
#endif
}
/** @brief Compare @c *__ptr and @c __comparand. If equal, let @c
* *__ptr=__replacement and return @c true, return @c false otherwise.
*
* Implementation is heavily platform-dependent.
* @param __ptr Pointer to signed integer.
* @param __comparand Compare value.
* @param __replacement Replacement value. */
template<typename _Tp>
inline bool
__compare_and_swap(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement)
{
if (sizeof(_Tp) == sizeof(int32_t))
return __compare_and_swap_32((volatile int32_t*) __ptr,
(int32_t)__comparand,
(int32_t)__replacement);
else if (sizeof(_Tp) == sizeof(int64_t))
return __compare_and_swap_64((volatile int64_t*) __ptr,
(int64_t)__comparand,
(int64_t)__replacement);
else
_GLIBCXX_PARALLEL_ASSERT(false);
}
/** @brief Yield the control to another thread, without waiting for
the end to the time slice. */
inline void
__yield()
{
#if defined (_WIN32) && !defined (__CYGWIN__)
Sleep(0);
#else
sched_yield();
#endif
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_COMPATIBILITY_H */
|
GB_subassign_17.c | //------------------------------------------------------------------------------
// GB_subassign_17: C(I,J)<!M,repl> = scalar ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 17: C(I,J)<!M,repl> = scalar ; using S
// M: present
// Mask_comp: true
// C_replace: true
// accum: NULL
// A: scalar
// S: constructed
// C: not bitmap
// M: not bitmap
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_17
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (&S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (M) ;
GB_GET_C ; // C must not be bitmap
const int64_t Cnvec = C->nvec ;
const int64_t *GB_RESTRICT Ch = C->h ;
const int64_t *GB_RESTRICT Cp = C->p ;
const bool C_is_hyper = (Ch != NULL) ;
GB_GET_MASK ;
GB_GET_SCALAR ;
GB_GET_S ;
GrB_BinaryOp accum = NULL ;
//--------------------------------------------------------------------------
// Method 17: C(I,J)<!M,repl> = scalar ; using S
//--------------------------------------------------------------------------
// Time: Close to optimal; must visit all IxJ, so Omega(|I|*|J|) is
// required. The sparsity of !M cannot be exploited.
// Methods 13, 15, 17, and 19 are very similar.
//--------------------------------------------------------------------------
// Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19)
//--------------------------------------------------------------------------
GB_SUBASSIGN_IXJ_SLICE ;
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j) and M(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
GB_GET_VECTOR_FOR_IXJ (M, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC)<!M,repl> = scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
//--------------------------------------------------------------
// Get the indices at the top of each list.
//--------------------------------------------------------------
int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ;
int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ;
//--------------------------------------------------------------
// find the smallest index of [iS iA iM] (always iA)
//--------------------------------------------------------------
int64_t i = iA ;
//--------------------------------------------------------------
// get M(i,j)
//--------------------------------------------------------------
bool mij ;
if (i == iM)
{
// mij = (bool) M [pM]
mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
GB_NEXT (M) ;
}
else
{
// mij not present, implicitly false
ASSERT (i < iM) ;
mij = false ;
}
// complement the mask entry mij since Mask_comp is true
mij = !mij ;
//--------------------------------------------------------------
// assign the entry
//--------------------------------------------------------------
if (i == iS)
{
ASSERT (i == iA) ;
{
// both S (i,j) and A (i,j) present
GB_C_S_LOOKUP ;
if (mij)
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =A ): copy A, no accum
// [X A 1]: action: ( undelete ): zombie lives
GB_noaccum_C_A_1_scalar ;
}
else
{
// ----[C A 0] or [X A 0]---------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): zombie
GB_DELETE_ENTRY ;
}
GB_NEXT (S) ;
}
}
else
{
ASSERT (i == iA) ;
{
// S (i,j) is not present, A (i,j) is present
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
}
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j) and M(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
GB_GET_VECTOR_FOR_IXJ (M, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC)<!M,repl> = scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
//--------------------------------------------------------------
// Get the indices at the top of each list.
//--------------------------------------------------------------
int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ;
int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ;
//--------------------------------------------------------------
// find the smallest index of [iS iA iM] (always iA)
//--------------------------------------------------------------
int64_t i = iA ;
//--------------------------------------------------------------
// get M(i,j)
//--------------------------------------------------------------
bool mij ;
if (i == iM)
{
// mij = (bool) M [pM]
mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
GB_NEXT (M) ;
}
else
{
// mij not present, implicitly false
ASSERT (i < iM) ;
mij = false ;
}
// complement the mask entry mij since Mask_comp is true
mij = !mij ;
//--------------------------------------------------------------
// assign the entry
//--------------------------------------------------------------
if (i == iS)
{
ASSERT (i == iA) ;
{
GB_NEXT (S) ;
}
}
else
{
ASSERT (i == iA) ;
{
// S (i,j) is not present, A (i,j) is present
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
}
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
par_ilu_solve.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* ILU solve routine
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "_hypre_utilities.hpp"
#include "par_ilu.h"
/*--------------------------------------------------------------------
* hypre_ILUSolve
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_ILUSolve( void *ilu_vdata,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
// HYPRE_Int i;
hypre_ParILUData *ilu_data = (hypre_ParILUData*) ilu_vdata;
#ifdef HYPRE_USING_CUDA
/* pointers to cusparse data, note that they are not NULL only when needed */
cusparseMatDescr_t matL_des = hypre_ParILUDataMatLMatrixDescription(ilu_data);
cusparseMatDescr_t matU_des = hypre_ParILUDataMatUMatrixDescription(ilu_data);
void *ilu_solve_buffer = hypre_ParILUDataILUSolveBuffer(ilu_data);//device memory
cusparseSolvePolicy_t ilu_solve_policy = hypre_ParILUDataILUSolvePolicy(ilu_data);
hypre_CSRMatrix *matALU_d = hypre_ParILUDataMatAILUDevice(ilu_data);
hypre_CSRMatrix *matBLU_d = hypre_ParILUDataMatBILUDevice(ilu_data);
//hypre_CSRMatrix *matSLU_d = hypre_ParILUDataMatSILUDevice(ilu_data);
hypre_CSRMatrix *matE_d = hypre_ParILUDataMatEDevice(ilu_data);
hypre_CSRMatrix *matF_d = hypre_ParILUDataMatFDevice(ilu_data);
csrsv2Info_t matAL_info = hypre_ParILUDataMatALILUSolveInfo(ilu_data);
csrsv2Info_t matAU_info = hypre_ParILUDataMatAUILUSolveInfo(ilu_data);
csrsv2Info_t matBL_info = hypre_ParILUDataMatBLILUSolveInfo(ilu_data);
csrsv2Info_t matBU_info = hypre_ParILUDataMatBUILUSolveInfo(ilu_data);
csrsv2Info_t matSL_info = hypre_ParILUDataMatSLILUSolveInfo(ilu_data);
csrsv2Info_t matSU_info = hypre_ParILUDataMatSUILUSolveInfo(ilu_data);
hypre_ParCSRMatrix *Aperm = hypre_ParILUDataAperm(ilu_data);
//hypre_ParCSRMatrix *R = hypre_ParILUDataR(ilu_data);
//hypre_ParCSRMatrix *P = hypre_ParILUDataP(ilu_data);
#endif
/* get matrices */
HYPRE_Int ilu_type = hypre_ParILUDataIluType(ilu_data);
HYPRE_Int *perm = hypre_ParILUDataPerm(ilu_data);
HYPRE_Int *qperm = hypre_ParILUDataQPerm(ilu_data);
hypre_ParCSRMatrix *matA = hypre_ParILUDataMatA(ilu_data);
hypre_ParCSRMatrix *matL = hypre_ParILUDataMatL(ilu_data);
HYPRE_Real *matD = hypre_ParILUDataMatD(ilu_data);
hypre_ParCSRMatrix *matU = hypre_ParILUDataMatU(ilu_data);
#ifndef HYPRE_USING_CUDA
hypre_ParCSRMatrix *matmL = hypre_ParILUDataMatLModified(ilu_data);
HYPRE_Real *matmD = hypre_ParILUDataMatDModified(ilu_data);
hypre_ParCSRMatrix *matmU = hypre_ParILUDataMatUModified(ilu_data);
#endif
hypre_ParCSRMatrix *matS = hypre_ParILUDataMatS(ilu_data);
HYPRE_Int iter, num_procs, my_id;
hypre_ParVector *F_array = hypre_ParILUDataF(ilu_data);
hypre_ParVector *U_array = hypre_ParILUDataU(ilu_data);
/* get settings */
HYPRE_Real tol = hypre_ParILUDataTol(ilu_data);
HYPRE_Int logging = hypre_ParILUDataLogging(ilu_data);
HYPRE_Int print_level = hypre_ParILUDataPrintLevel(ilu_data);
HYPRE_Int max_iter = hypre_ParILUDataMaxIter(ilu_data);
HYPRE_Real *norms = hypre_ParILUDataRelResNorms(ilu_data);
hypre_ParVector *Ftemp = hypre_ParILUDataFTemp(ilu_data);
hypre_ParVector *Utemp = hypre_ParILUDataUTemp(ilu_data);
hypre_ParVector *Xtemp = hypre_ParILUDataXTemp(ilu_data);
hypre_ParVector *Ytemp = hypre_ParILUDataYTemp(ilu_data);
HYPRE_Real *fext = hypre_ParILUDataFExt(ilu_data);
HYPRE_Real *uext = hypre_ParILUDataUExt(ilu_data);
hypre_ParVector *residual;
HYPRE_Real alpha = -1;
HYPRE_Real beta = 1;
HYPRE_Real conv_factor = 0.0;
HYPRE_Real resnorm = 1.0;
HYPRE_Real init_resnorm = 0.0;
HYPRE_Real rel_resnorm;
HYPRE_Real rhs_norm = 0.0;
HYPRE_Real old_resnorm;
HYPRE_Real ieee_check = 0.0;
HYPRE_Real operat_cmplxty = hypre_ParILUDataOperatorComplexity(ilu_data);
HYPRE_Int Solve_err_flag;
#ifdef HYPRE_USING_CUDA
HYPRE_Int test_opt;
#endif
/* problem size */
HYPRE_Int n = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
HYPRE_Int nLU = hypre_ParILUDataNLU(ilu_data);
HYPRE_Int *u_end = hypre_ParILUDataUEnd(ilu_data);
/* Schur system solve */
HYPRE_Solver schur_solver = hypre_ParILUDataSchurSolver(ilu_data);
HYPRE_Solver schur_precond = hypre_ParILUDataSchurPrecond(ilu_data);
hypre_ParVector *rhs = hypre_ParILUDataRhs(ilu_data);
hypre_ParVector *x = hypre_ParILUDataX(ilu_data);
/* begin */
HYPRE_ANNOTATE_FUNC_BEGIN;
if(logging > 1)
{
residual = hypre_ParILUDataResidual(ilu_data);
}
hypre_ParILUDataNumIterations(ilu_data) = 0;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
/*-----------------------------------------------------------------------
* Write the solver parameters
*-----------------------------------------------------------------------*/
if (my_id == 0 && print_level > 1)
{
hypre_ILUWriteSolverParams(ilu_data);
}
/*-----------------------------------------------------------------------
* Initialize the solver error flag
*-----------------------------------------------------------------------*/
Solve_err_flag = 0;
/*-----------------------------------------------------------------------
* write some initial info
*-----------------------------------------------------------------------*/
if (my_id == 0 && print_level > 1 && tol > 0.)
{
hypre_printf("\n\n ILU SOLVER SOLUTION INFO:\n");
}
/*-----------------------------------------------------------------------
* Compute initial residual and print
*-----------------------------------------------------------------------*/
if (print_level > 1 || logging > 1 || tol > 0.)
{
if ( logging > 1 )
{
hypre_ParVectorCopy(f, residual );
if (tol > 0.0)
{
hypre_ParCSRMatrixMatvec(alpha, A, u, beta, residual );
}
resnorm = sqrt(hypre_ParVectorInnerProd( residual, residual ));
}
else
{
hypre_ParVectorCopy(f, Ftemp);
if (tol > 0.0)
{
hypre_ParCSRMatrixMatvec(alpha, A, u, beta, Ftemp);
}
resnorm = sqrt(hypre_ParVectorInnerProd(Ftemp, Ftemp));
}
/* Since it is does not diminish performance, attempt to return an error flag
and notify users when they supply bad input. */
if (resnorm != 0.)
{
ieee_check = resnorm/resnorm; /* INF -> NaN conversion */
}
if (ieee_check != ieee_check)
{
/* ...INFs or NaNs in input can make ieee_check a NaN. This test
for ieee_check self-equality works on all IEEE-compliant compilers/
machines, c.f. page 8 of "Lecture Notes on the Status of IEEE 754"
by W. Kahan, May 31, 1996. Currently (July 2002) this paper may be
found at http://HTTP.CS.Berkeley.EDU/~wkahan/ieee754status/IEEE754.PDF */
if (print_level > 0)
{
hypre_printf("\n\nERROR detected by Hypre ... BEGIN\n");
hypre_printf("ERROR -- hypre_ILUSolve: INFs and/or NaNs detected in input.\n");
hypre_printf("User probably placed non-numerics in supplied A, x_0, or b.\n");
hypre_printf("ERROR detected by Hypre ... END\n\n\n");
}
hypre_error(HYPRE_ERROR_GENERIC);
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
init_resnorm = resnorm;
rhs_norm = sqrt(hypre_ParVectorInnerProd(f, f));
if (rhs_norm > HYPRE_REAL_EPSILON)
{
rel_resnorm = init_resnorm / rhs_norm;
}
else
{
/* rhs is zero, return a zero solution */
hypre_ParVectorSetConstantValues(U_array, 0.0);
if(logging > 0)
{
rel_resnorm = 0.0;
hypre_ParILUDataFinalRelResidualNorm(ilu_data) = rel_resnorm;
}
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
}
else
{
rel_resnorm = 1.;
}
if (my_id == 0 && print_level > 1)
{
hypre_printf(" relative\n");
hypre_printf(" residual factor residual\n");
hypre_printf(" -------- ------ --------\n");
hypre_printf(" Initial %e %e\n",init_resnorm,
rel_resnorm);
}
matA = A;
U_array = u;
F_array = f;
/************** Main Solver Loop - always do 1 iteration ************/
iter = 0;
while ((rel_resnorm >= tol || iter < 1)
&& iter < max_iter)
{
/* Do one solve on LUe=r */
switch(ilu_type){
case 0: case 1:
#ifdef HYPRE_USING_CUDA
/* Apply GPU-accelerated LU solve */
hypre_ILUSolveCusparseLU(matA, matL_des, matU_des, matBL_info, matBU_info, matBLU_d, ilu_solve_policy,
ilu_solve_buffer, F_array, U_array, perm, n, Utemp, Ftemp);//BJ-cusparse
#else
hypre_ILUSolveLU(matA, F_array, U_array, perm, n, matL, matD, matU, Utemp, Ftemp); //BJ
#endif
break;
case 10: case 11:
#ifdef HYPRE_USING_CUDA
/* Apply GPU-accelerated LU solve */
hypre_ILUSolveCusparseSchurGMRES(matA, F_array, U_array, perm, nLU, matS, Utemp, Ftemp, schur_solver, schur_precond, rhs, x, u_end,
matL_des, matU_des, matBL_info, matBU_info, matSL_info, matSU_info,
matBLU_d, matE_d, matF_d, ilu_solve_policy, ilu_solve_buffer);//GMRES-cusparse
#else
hypre_ILUSolveSchurGMRES(matA, F_array, U_array, perm, perm, nLU, matL, matD, matU, matS,
Utemp, Ftemp, schur_solver, schur_precond, rhs, x, u_end); //GMRES
#endif
break;
case 20: case 21:
hypre_ILUSolveSchurNSH(matA, F_array, U_array, perm, nLU, matL, matD, matU, matS,
Utemp, Ftemp, schur_solver, rhs, x, u_end); //MR+NSH
break;
case 30: case 31:
hypre_ILUSolveLURAS(matA, F_array, U_array, perm, matL, matD, matU, Utemp, Utemp, fext, uext); //RAS
break;
case 40: case 41:
hypre_ILUSolveSchurGMRES(matA, F_array, U_array, perm, qperm, nLU, matL, matD, matU, matS,
Utemp, Ftemp, schur_solver, schur_precond, rhs, x, u_end); //GMRES
break;
case 50:
#ifdef HYPRE_USING_CUDA
test_opt = hypre_ParILUDataTestOption(ilu_data);
hypre_ILUSolveRAPGMRES(matA, F_array, U_array, perm, nLU, matS, Utemp, Ftemp, Xtemp, Ytemp, schur_solver, schur_precond, rhs, x, u_end,
matL_des, matU_des, matAL_info, matAU_info, matBL_info, matBU_info, matSL_info, matSU_info,
Aperm, matALU_d, matBLU_d, matE_d, matF_d, ilu_solve_policy, ilu_solve_buffer, test_opt);//GMRES-RAP
#else
hypre_ILUSolveRAPGMRESHOST(matA, F_array, U_array, perm, nLU, matL, matD, matU, matmL, matmD, matmU, Utemp, Ftemp, Xtemp, Ytemp,
schur_solver, schur_precond, rhs, x, u_end);//GMRES-RAP
#endif
break;
default:
#ifdef HYPRE_USING_CUDA
/* Apply GPU-accelerated LU solve */
hypre_ILUSolveCusparseLU(matA, matL_des, matU_des, matBL_info, matBU_info, matBLU_d, ilu_solve_policy,
ilu_solve_buffer, F_array, U_array, perm, n, Utemp, Ftemp);//BJ-cusparse
#else
hypre_ILUSolveLU(matA, F_array, U_array, perm, n, matL, matD, matU, Utemp, Ftemp); //BJ
#endif
break;
}
/*---------------------------------------------------------------
* Compute residual and residual norm
*----------------------------------------------------------------*/
if (print_level > 1 || logging > 1 || tol > 0.)
{
old_resnorm = resnorm;
if ( logging > 1 ) {
hypre_ParVectorCopy(F_array, residual);
hypre_ParCSRMatrixMatvec(alpha, matA, U_array, beta, residual );
resnorm = sqrt(hypre_ParVectorInnerProd( residual, residual ));
}
else {
hypre_ParVectorCopy(F_array, Ftemp);
hypre_ParCSRMatrixMatvec(alpha, matA, U_array, beta, Ftemp);
resnorm = sqrt(hypre_ParVectorInnerProd(Ftemp, Ftemp));
}
if (old_resnorm) conv_factor = resnorm / old_resnorm;
else conv_factor = resnorm;
if (rhs_norm > HYPRE_REAL_EPSILON)
{
rel_resnorm = resnorm / rhs_norm;
}
else
{
rel_resnorm = resnorm;
}
norms[iter] = rel_resnorm;
}
++iter;
hypre_ParILUDataNumIterations(ilu_data) = iter;
hypre_ParILUDataFinalRelResidualNorm(ilu_data) = rel_resnorm;
if (my_id == 0 && print_level > 1)
{
hypre_printf(" ILUSolve %2d %e %f %e \n", iter,
resnorm, conv_factor, rel_resnorm);
}
}
/* check convergence within max_iter */
if (iter == max_iter && tol > 0.)
{
Solve_err_flag = 1;
hypre_error(HYPRE_ERROR_CONV);
}
/*-----------------------------------------------------------------------
* Print closing statistics
* Add operator and grid complexity stats
*-----------------------------------------------------------------------*/
if (iter > 0 && init_resnorm)
{
conv_factor = pow((resnorm/init_resnorm),(1.0/(HYPRE_Real) iter));
}
else
{
conv_factor = 1.;
}
if (print_level > 1)
{
/*** compute operator and grid complexity (fill factor) here ?? ***/
if (my_id == 0)
{
if (Solve_err_flag == 1)
{
hypre_printf("\n\n==============================================");
hypre_printf("\n NOTE: Convergence tolerance was not achieved\n");
hypre_printf(" within the allowed %d iterations\n",max_iter);
hypre_printf("==============================================");
}
hypre_printf("\n\n Average Convergence Factor = %f \n",conv_factor);
hypre_printf(" operator = %f\n",operat_cmplxty);
}
}
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
/* Schur Complement solve with GMRES on schur complement
* ParCSRMatrix S is already built in ilu data sturcture, here directly use S
* L, D and U factors only have local scope (no off-diagonal processor terms)
* so apart from the residual calculation (which uses A), the solves with the
* L and U factors are local.
* S is the global Schur complement
* schur_solver is a GMRES solver
* schur_precond is the ILU preconditioner for GMRES
* rhs and x are helper vector for solving Schur system
*/
HYPRE_Int
hypre_ILUSolveSchurGMRES(hypre_ParCSRMatrix *A, hypre_ParVector *f,
hypre_ParVector *u, HYPRE_Int *perm, HYPRE_Int *qperm,
HYPRE_Int nLU, hypre_ParCSRMatrix *L,
HYPRE_Real* D, hypre_ParCSRMatrix *U,
hypre_ParCSRMatrix *S,
hypre_ParVector *ftemp, hypre_ParVector *utemp,
HYPRE_Solver schur_solver, HYPRE_Solver schur_precond,
hypre_ParVector *rhs, hypre_ParVector *x, HYPRE_Int *u_end)
{
/* data objects for communication */
// MPI_Comm comm = hypre_ParCSRMatrixComm(A);
/* data objects for L and U */
hypre_CSRMatrix *L_diag = hypre_ParCSRMatrixDiag(L);
HYPRE_Real *L_diag_data = hypre_CSRMatrixData(L_diag);
HYPRE_Int *L_diag_i = hypre_CSRMatrixI(L_diag);
HYPRE_Int *L_diag_j = hypre_CSRMatrixJ(L_diag);
hypre_CSRMatrix *U_diag = hypre_ParCSRMatrixDiag(U);
HYPRE_Real *U_diag_data = hypre_CSRMatrixData(U_diag);
HYPRE_Int *U_diag_i = hypre_CSRMatrixI(U_diag);
HYPRE_Int *U_diag_j = hypre_CSRMatrixJ(U_diag);
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
HYPRE_Real alpha;
HYPRE_Real beta;
HYPRE_Int i, j, k1, k2, col;
/* problem size */
HYPRE_Int n = hypre_CSRMatrixNumRows(L_diag);
// HYPRE_Int m = n - nLU;
/* other data objects for computation */
// hypre_Vector *f_local;
// HYPRE_Real *f_data;
hypre_Vector *rhs_local;
HYPRE_Real *rhs_data;
hypre_Vector *x_local;
HYPRE_Real *x_data;
/* begin */
beta = 1.0;
alpha = -1.0;
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* 1st need to solve LBi*xi = fi
* L solve, solve xi put in u_temp upper
*/
// f_local = hypre_ParVectorLocalVector(f);
// f_data = hypre_VectorData(f_local);
/* now update with L to solve */
for(i = 0 ; i < nLU ; i ++)
{
utemp_data[qperm[i]] = ftemp_data[perm[i]];
k1 = L_diag_i[i] ; k2 = L_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
utemp_data[qperm[i]] -= L_diag_data[j] * utemp_data[qperm[L_diag_j[j]]];
}
}
/* 2nd need to compute g'i = gi - Ei*UBi^-1*xi
* now put g'i into the f_temp lower
*/
for(i = nLU ; i < n ; i ++)
{
k1 = L_diag_i[i] ; k2 = L_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = L_diag_j[j];
ftemp_data[perm[i]] -= L_diag_data[j] * utemp_data[qperm[col]];
}
}
/* 3rd need to solve global Schur Complement Sy = g'
* for now only solve the local system
* solve y put in u_temp lower
* only solve whe S is not NULL
*/
if(S)
{
/*initialize solution to zero for residual equation */
hypre_ParVectorSetConstantValues(x, 0.0);
/* setup vectors for solve */
rhs_local = hypre_ParVectorLocalVector(rhs);
rhs_data = hypre_VectorData(rhs_local);
x_local = hypre_ParVectorLocalVector(x);
x_data = hypre_VectorData(x_local);
/* set rhs value */
for(i = nLU ; i < n ; i ++)
{
rhs_data[i-nLU] = ftemp_data[perm[i]];
}
/* solve */
HYPRE_GMRESSolve(schur_solver,(HYPRE_Matrix)S,(HYPRE_Vector)rhs,(HYPRE_Vector)x);
/* copy value back to original */
for(i = nLU ; i < n ; i ++)
{
utemp_data[qperm[i]] = x_data[i-nLU];
}
}
/* 4th need to compute zi = xi - LBi^-1*Fi*yi
* put zi in f_temp upper
* only do this computation when nLU < n
* U is unsorted, search is expensive when unnecessary
*/
if(nLU < n)
{
for(i = 0 ; i < nLU ; i ++)
{
ftemp_data[perm[i]] = utemp_data[qperm[i]];
k1 = u_end[i] ; k2 = U_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = U_diag_j[j];
ftemp_data[perm[i]] -= U_diag_data[j] * utemp_data[qperm[col]];
}
}
for(i = 0 ; i < nLU ; i ++)
{
utemp_data[qperm[i]] = ftemp_data[perm[i]];
}
}
/* 5th need to solve UBi*ui = zi */
/* put result in u_temp upper */
for(i = nLU-1 ; i >= 0 ; i --)
{
k1 = U_diag_i[i] ; k2 = u_end[i];
for(j = k1 ; j < k2 ; j ++)
{
col = U_diag_j[j];
utemp_data[qperm[i]] -= U_diag_data[j] * utemp_data[qperm[col]];
}
utemp_data[qperm[i]] *= D[i];
}
/* done, now everything are in u_temp, update solution */
hypre_ParVectorAxpy(beta, utemp, u);
return hypre_error_flag;
}
/* Newton-Schulz-Hotelling solve
* ParCSRMatrix S is already built in ilu data sturcture
* S here is the INVERSE of Schur Complement
* L, D and U factors only have local scope (no off-diagonal processor terms)
* so apart from the residual calculation (which uses A), the solves with the
* L and U factors are local.
* S is the inverse global Schur complement
* rhs and x are helper vector for solving Schur system
*/
HYPRE_Int
hypre_ILUSolveSchurNSH(hypre_ParCSRMatrix *A, hypre_ParVector *f,
hypre_ParVector *u, HYPRE_Int *perm,
HYPRE_Int nLU, hypre_ParCSRMatrix *L,
HYPRE_Real* D, hypre_ParCSRMatrix *U,
hypre_ParCSRMatrix *S,
hypre_ParVector *ftemp, hypre_ParVector *utemp,
HYPRE_Solver schur_solver,
hypre_ParVector *rhs, hypre_ParVector *x, HYPRE_Int *u_end)
{
/* data objects for communication */
// MPI_Comm comm = hypre_ParCSRMatrixComm(A);
/* data objects for L and U */
hypre_CSRMatrix *L_diag = hypre_ParCSRMatrixDiag(L);
HYPRE_Real *L_diag_data = hypre_CSRMatrixData(L_diag);
HYPRE_Int *L_diag_i = hypre_CSRMatrixI(L_diag);
HYPRE_Int *L_diag_j = hypre_CSRMatrixJ(L_diag);
hypre_CSRMatrix *U_diag = hypre_ParCSRMatrixDiag(U);
HYPRE_Real *U_diag_data = hypre_CSRMatrixData(U_diag);
HYPRE_Int *U_diag_i = hypre_CSRMatrixI(U_diag);
HYPRE_Int *U_diag_j = hypre_CSRMatrixJ(U_diag);
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
HYPRE_Real alpha;
HYPRE_Real beta;
HYPRE_Int i, j, k1, k2, col;
/* problem size */
HYPRE_Int n = hypre_CSRMatrixNumRows(L_diag);
// HYPRE_Int m = n - nLU;
/* other data objects for computation */
// hypre_Vector *f_local;
// HYPRE_Real *f_data;
hypre_Vector *rhs_local;
HYPRE_Real *rhs_data;
hypre_Vector *x_local;
HYPRE_Real *x_data;
/* begin */
beta = 1.0;
alpha = -1.0;
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* 1st need to solve LBi*xi = fi
* L solve, solve xi put in u_temp upper
*/
// f_local = hypre_ParVectorLocalVector(f);
// f_data = hypre_VectorData(f_local);
/* now update with L to solve */
for(i = 0 ; i < nLU ; i ++)
{
utemp_data[perm[i]] = ftemp_data[perm[i]];
k1 = L_diag_i[i] ; k2 = L_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
utemp_data[perm[i]] -= L_diag_data[j] * utemp_data[perm[L_diag_j[j]]];
}
}
/* 2nd need to compute g'i = gi - Ei*UBi^-1*xi
* now put g'i into the f_temp lower
*/
for(i = nLU ; i < n ; i ++)
{
k1 = L_diag_i[i] ; k2 = L_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = L_diag_j[j];
ftemp_data[perm[i]] -= L_diag_data[j] * utemp_data[perm[col]];
}
}
/* 3rd need to solve global Schur Complement Sy = g'
* for now only solve the local system
* solve y put in u_temp lower
* only solve when S is not NULL
*/
if(S)
{
/*initialize solution to zero for residual equation */
hypre_ParVectorSetConstantValues(x, 0.0);
/* setup vectors for solve */
rhs_local = hypre_ParVectorLocalVector(rhs);
rhs_data = hypre_VectorData(rhs_local);
x_local = hypre_ParVectorLocalVector(x);
x_data = hypre_VectorData(x_local);
/* set rhs value */
for(i = nLU ; i < n ; i ++)
{
rhs_data[i-nLU] = ftemp_data[perm[i]];
}
/* Solve Schur system with approx inverse
* x = S*rhs
*/
hypre_NSHSolve(schur_solver,S,rhs,x);
/* copy value back to original */
for(i = nLU ; i < n ; i ++)
{
utemp_data[perm[i]] = x_data[i-nLU];
}
}
/* 4th need to compute zi = xi - LBi^-1*yi
* put zi in f_temp upper
* only do this computation when nLU < n
* U is unsorted, search is expensive when unnecessary
*/
if(nLU < n)
{
for(i = 0 ; i < nLU ; i ++)
{
ftemp_data[perm[i]] = utemp_data[perm[i]];
k1 = u_end[i] ; k2 = U_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = U_diag_j[j];
ftemp_data[perm[i]] -= U_diag_data[j] * utemp_data[perm[col]];
}
}
for(i = 0 ; i < nLU ; i ++)
{
utemp_data[perm[i]] = ftemp_data[perm[i]];
}
}
/* 5th need to solve UBi*ui = zi */
/* put result in u_temp upper */
for(i = nLU-1 ; i >= 0 ; i --)
{
k1 = U_diag_i[i] ; k2 = u_end[i];
for(j = k1 ; j < k2 ; j ++)
{
col = U_diag_j[j];
utemp_data[perm[i]] -= U_diag_data[j] * utemp_data[perm[col]];
}
utemp_data[perm[i]] *= D[i];
}
/* done, now everything are in u_temp, update solution */
hypre_ParVectorAxpy(beta, utemp, u);
return hypre_error_flag;
}
/* Incomplete LU solve
* L, D and U factors only have local scope (no off-diagonal processor terms)
* so apart from the residual calculation (which uses A), the solves with the
* L and U factors are local.
*/
HYPRE_Int
hypre_ILUSolveLU(hypre_ParCSRMatrix *A, hypre_ParVector *f,
hypre_ParVector *u, HYPRE_Int *perm,
HYPRE_Int nLU, hypre_ParCSRMatrix *L,
HYPRE_Real* D, hypre_ParCSRMatrix *U,
hypre_ParVector *ftemp, hypre_ParVector *utemp)
{
hypre_CSRMatrix *L_diag = hypre_ParCSRMatrixDiag(L);
HYPRE_Real *L_diag_data = hypre_CSRMatrixData(L_diag);
HYPRE_Int *L_diag_i = hypre_CSRMatrixI(L_diag);
HYPRE_Int *L_diag_j = hypre_CSRMatrixJ(L_diag);
hypre_CSRMatrix *U_diag = hypre_ParCSRMatrixDiag(U);
HYPRE_Real *U_diag_data = hypre_CSRMatrixData(U_diag);
HYPRE_Int *U_diag_i = hypre_CSRMatrixI(U_diag);
HYPRE_Int *U_diag_j = hypre_CSRMatrixJ(U_diag);
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
HYPRE_Real alpha;
HYPRE_Real beta;
HYPRE_Int i, j, k1, k2;
/* begin */
alpha = -1.0;
beta = 1.0;
/* Initialize Utemp to zero.
* This is necessary for correctness, when we use optimized
* vector operations in the case where sizeof(L, D or U) < sizeof(A)
*/
//hypre_ParVectorSetConstantValues( utemp, 0.);
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* L solve - Forward solve */
/* copy rhs to account for diagonal of L (which is identity) */
for( i = 0; i < nLU; i++ )
{
utemp_data[perm[i]] = ftemp_data[perm[i]];
}
/* update with remaining (off-diagonal) entries of L */
for( i = 0; i < nLU; i++ )
{
k1 = L_diag_i[i] ; k2 = L_diag_i[i+1];
for(j=k1; j <k2; j++)
{
utemp_data[perm[i]] -= L_diag_data[j] * utemp_data[perm[L_diag_j[j]]];
}
}
/*-------------------- U solve - Backward substitution */
for( i = nLU-1; i >= 0; i-- )
{
/* first update with the remaining (off-diagonal) entries of U */
k1 = U_diag_i[i] ; k2 = U_diag_i[i+1];
for(j=k1; j <k2; j++)
{
utemp_data[perm[i]] -= U_diag_data[j] * utemp_data[perm[U_diag_j[j]]];
}
/* diagonal scaling (contribution from D. Note: D is stored as its inverse) */
utemp_data[perm[i]] *= D[i];
}
/* Update solution */
hypre_ParVectorAxpy(beta, utemp, u);
return hypre_error_flag;
}
/* Incomplete LU solve RAS
* L, D and U factors only have local scope (no off-diagonal processor terms)
* so apart from the residual calculation (which uses A), the solves with the
* L and U factors are local.
* fext and uext are tempory arrays for external data
*/
HYPRE_Int
hypre_ILUSolveLURAS(hypre_ParCSRMatrix *A, hypre_ParVector *f,
hypre_ParVector *u, HYPRE_Int *perm,
hypre_ParCSRMatrix *L,
HYPRE_Real* D, hypre_ParCSRMatrix *U,
hypre_ParVector *ftemp, hypre_ParVector *utemp,
HYPRE_Real *fext, HYPRE_Real *uext)
{
hypre_ParCSRCommPkg *comm_pkg;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int num_sends, begin, end;
hypre_CSRMatrix *L_diag = hypre_ParCSRMatrixDiag(L);
HYPRE_Real *L_diag_data = hypre_CSRMatrixData(L_diag);
HYPRE_Int *L_diag_i = hypre_CSRMatrixI(L_diag);
HYPRE_Int *L_diag_j = hypre_CSRMatrixJ(L_diag);
hypre_CSRMatrix *U_diag = hypre_ParCSRMatrixDiag(U);
HYPRE_Real *U_diag_data = hypre_CSRMatrixData(U_diag);
HYPRE_Int *U_diag_i = hypre_CSRMatrixI(U_diag);
HYPRE_Int *U_diag_j = hypre_CSRMatrixJ(U_diag);
HYPRE_Int n = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A));
HYPRE_Int m = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
// HYPRE_Int buffer_size;
HYPRE_Int n_total = m + n;
HYPRE_Int idx;
HYPRE_Int jcol;
HYPRE_Int col;
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
HYPRE_Real alpha;
HYPRE_Real beta;
HYPRE_Int i, j, k1, k2;
/* begin */
alpha = -1.0;
beta = 1.0;
/* prepare for communication */
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
/* setup if not yet built */
if(!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Initialize Utemp to zero.
* This is necessary for correctness, when we use optimized
* vector operations in the case where sizeof(L, D or U) < sizeof(A)
*/
//hypre_ParVectorSetConstantValues( utemp, 0.);
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* communication to get external data */
/* get total num of send */
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg,0);
end = hypre_ParCSRCommPkgSendMapStart(comm_pkg,num_sends);
/* copy new index into send_buf */
for(i = begin ; i < end ; i ++)
{
/* all we need is just send out data, we don't need to worry about the
* permutation of offd part, actually we don't need to worry about
* permutation at all
* borrow uext as send buffer .
*/
uext[i-begin] = ftemp_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)];
}
/* main communication */
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, uext, fext);
hypre_ParCSRCommHandleDestroy(comm_handle);
/* L solve - Forward solve */
for( i = 0 ; i < n_total ; i ++)
{
k1 = L_diag_i[i] ; k2 = L_diag_i[i+1];
if( i < n )
{
/* diag part */
utemp_data[perm[i]] = ftemp_data[perm[i]];
for(j=k1; j <k2; j++)
{
col = L_diag_j[j];
if( col < n )
{
utemp_data[perm[i]] -= L_diag_data[j] * utemp_data[perm[col]];
}
else
{
jcol = col - n;
utemp_data[perm[i]] -= L_diag_data[j] * uext[jcol];
}
}
}
else
{
/* offd part */
idx = i - n;
uext[idx] = fext[idx];
for(j=k1; j <k2; j++)
{
col = L_diag_j[j];
if(col < n)
{
uext[idx] -= L_diag_data[j] * utemp_data[perm[col]];
}
else
{
jcol = col - n;
uext[idx] -= L_diag_data[j] * uext[jcol];
}
}
}
}
/*-------------------- U solve - Backward substitution */
for( i = n_total-1; i >= 0; i-- )
{
/* first update with the remaining (off-diagonal) entries of U */
k1 = U_diag_i[i] ; k2 = U_diag_i[i+1];
if( i < n )
{
/* diag part */
for(j=k1; j <k2; j++)
{
col = U_diag_j[j];
if( col < n )
{
utemp_data[perm[i]] -= U_diag_data[j] * utemp_data[perm[col]];
}
else
{
jcol = col - n;
utemp_data[perm[i]] -= U_diag_data[j] * uext[jcol];
}
}
/* diagonal scaling (contribution from D. Note: D is stored as its inverse) */
utemp_data[perm[i]] *= D[i];
}
else
{
/* 2nd part of offd */
idx = i - n;
for(j=k1; j <k2; j++)
{
col = U_diag_j[j];
if( col < n )
{
uext[idx] -= U_diag_data[j] * utemp_data[perm[col]];
}
else
{
jcol = col - n;
uext[idx] -= U_diag_data[j] * uext[jcol];
}
}
/* diagonal scaling (contribution from D. Note: D is stored as its inverse) */
uext[idx] *= D[i];
}
}
/* Update solution */
hypre_ParVectorAxpy(beta, utemp, u);
return hypre_error_flag;
}
#ifdef HYPRE_USING_CUDA
/* Permutation function (for GPU version, can just call thrust)
* option 00: perm integer array
* option 01: rperm integer array
* option 10: perm real array
* option 11: rperm real array
* */
HYPRE_Int
hypre_ILUSeqVectorPerm(void *vectori, void *vectoro, HYPRE_Int size, HYPRE_Int *perm, HYPRE_Int option)
{
cudaDeviceSynchronize();
HYPRE_Int i;
switch(option)
{
case 00:
{
HYPRE_Int *ivectori = (HYPRE_Int *) vectori;
HYPRE_Int *ivectoro = (HYPRE_Int *) vectoro;
for(i = 0 ; i < size ; i ++)
{
ivectoro[i] = ivectori[perm[i]];
}
break;
}
case 01:
{
HYPRE_Int *ivectori = (HYPRE_Int *) vectori;
HYPRE_Int *ivectoro = (HYPRE_Int *) vectoro;
for(i = 0 ; i < size ; i ++)
{
ivectoro[perm[i]] = ivectori[i];
}
break;
}
case 10:
{
HYPRE_Real *dvectori = (HYPRE_Real *) vectori;
HYPRE_Real *dvectoro = (HYPRE_Real *) vectoro;
for(i = 0 ; i < size ; i ++)
{
dvectoro[i] = dvectori[perm[i]];
}
break;
}
case 11:
{
HYPRE_Real *dvectori = (HYPRE_Real *) vectori;
HYPRE_Real *dvectoro = (HYPRE_Real *) vectoro;
for(i = 0 ; i < size ; i ++)
{
dvectoro[perm[i]] = dvectori[i];
}
break;
}
default:
{
printf("Error option in ILUSeqVectorPerm");
hypre_assert(1==0);
}
}
return hypre_error_flag;
}
/* Incomplete LU solve (GPU)
* L, D and U factors only have local scope (no off-diagonal processor terms)
* so apart from the residual calculation (which uses A), the solves with the
* L and U factors are local.
*/
HYPRE_Int
hypre_ILUSolveCusparseLU(hypre_ParCSRMatrix *A, cusparseMatDescr_t matL_des, cusparseMatDescr_t matU_des,
csrsv2Info_t matL_info, csrsv2Info_t matU_info, hypre_CSRMatrix *matLU_d,
cusparseSolvePolicy_t ilu_solve_policy, void *ilu_solve_buffer,
hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int *perm,
HYPRE_Int n, hypre_ParVector *ftemp, hypre_ParVector *utemp)
{
/* Only solve when we have stuffs to be solved */
if(n == 0)
{
return hypre_error_flag;
}
/* ILU data */
HYPRE_Real *LU_data = hypre_CSRMatrixData(matLU_d);
HYPRE_Int *LU_i = hypre_CSRMatrixI(matLU_d);
HYPRE_Int *LU_j = hypre_CSRMatrixJ(matLU_d);
HYPRE_Int nnz = LU_i[n];
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
HYPRE_Real alpha;
HYPRE_Real beta;
//HYPRE_Int i, j, k1, k2;
HYPRE_Int isDoublePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double);
HYPRE_Int isSinglePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double) / 2;
hypre_assert(isDoublePrecision || isSinglePrecision);
/* begin */
alpha = -1.0;
beta = 1.0;
cusparseHandle_t handle = hypre_HandleCusparseHandle(hypre_handle());
/* Initialize Utemp to zero.
* This is necessary for correctness, when we use optimized
* vector operations in the case where sizeof(L, D or U) < sizeof(A)
*/
//hypre_ParVectorSetConstantValues( utemp, 0.);
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* apply permutation */
HYPRE_THRUST_CALL(gather, perm, perm + n, ftemp_data, utemp_data);
if(isDoublePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, nnz, (hypre_double *) &beta, matL_des,
(hypre_double *) LU_data, LU_i, LU_j, matL_info,
(hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
/* U solve - Backward substitution */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, nnz, (hypre_double *) &beta, matU_des,
(hypre_double *) LU_data, LU_i, LU_j, matU_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, nnz, (float *) &beta, matL_des,
(float *) LU_data, LU_i, LU_j, matL_info,
(float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
/* U solve - Backward substitution */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, nnz, (float *) &beta, matU_des,
(float *) LU_data, LU_i, LU_j, matU_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
/* apply reverse permutation */
HYPRE_THRUST_CALL(scatter,utemp_data, utemp_data + n, perm, ftemp_data);
/* Update solution */
hypre_ParVectorAxpy(beta, ftemp, u);
return hypre_error_flag;
}
/* Schur Complement solve with GMRES on schur complement
* ParCSRMatrix S is already built in ilu data sturcture, here directly use S
* L, D and U factors only have local scope (no off-diagonal processor terms)
* so apart from the residual calculation (which uses A), the solves with the
* L and U factors are local.
* S is the global Schur complement
* schur_solver is a GMRES solver
* schur_precond is the ILU preconditioner for GMRES
* rhs and x are helper vector for solving Schur system
*/
HYPRE_Int
hypre_ILUSolveCusparseSchurGMRES(hypre_ParCSRMatrix *A, hypre_ParVector *f,
hypre_ParVector *u, HYPRE_Int *perm,
HYPRE_Int nLU, hypre_ParCSRMatrix *S,
hypre_ParVector *ftemp, hypre_ParVector *utemp,
HYPRE_Solver schur_solver, HYPRE_Solver schur_precond,
hypre_ParVector *rhs, hypre_ParVector *x, HYPRE_Int *u_end,
cusparseMatDescr_t matL_des, cusparseMatDescr_t matU_des,
csrsv2Info_t matBL_info, csrsv2Info_t matBU_info, csrsv2Info_t matSL_info, csrsv2Info_t matSU_info,
hypre_CSRMatrix *matBLU_d, hypre_CSRMatrix *matE_d, hypre_CSRMatrix *matF_d,
cusparseSolvePolicy_t ilu_solve_policy, void *ilu_solve_buffer)
{
/* If we don't have S block, just do one L solve and one U solve */
if(!S)
{
/* Just call BJ cusparse and return */
return hypre_ILUSolveCusparseLU(A, matL_des, matU_des, matBL_info, matBU_info, matBLU_d, ilu_solve_policy,
ilu_solve_buffer, f, u, perm, nLU, ftemp, utemp);
}
/* data objects for communication */
// MPI_Comm comm = hypre_ParCSRMatrixComm(A);
/* data objects for temp vector */
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
hypre_Vector *rhs_local = hypre_ParVectorLocalVector(rhs);
HYPRE_Real *rhs_data = hypre_VectorData(rhs_local);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
HYPRE_Real *x_data = hypre_VectorData(x_local);
HYPRE_Real alpha;
HYPRE_Real beta;
//HYPRE_Real gamma;
//HYPRE_Int i, j, k1, k2, col;
/* problem size */
HYPRE_Int *BLU_i = NULL;
HYPRE_Int *BLU_j = NULL;
HYPRE_Real *BLU_data = NULL;
HYPRE_Int BLU_nnz = 0;
hypre_CSRMatrix *matSLU_d = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *SLU_i = hypre_CSRMatrixI(matSLU_d);
HYPRE_Int *SLU_j = hypre_CSRMatrixJ(matSLU_d);
HYPRE_Real *SLU_data = hypre_CSRMatrixData(matSLU_d);
HYPRE_Int m = hypre_CSRMatrixNumRows(matSLU_d);
HYPRE_Int n = nLU + m;
HYPRE_Int SLU_nnz = SLU_i[m];
hypre_Vector *ftemp_upper = hypre_SeqVectorCreate(nLU);
hypre_Vector *utemp_lower = hypre_SeqVectorCreate(m);
hypre_VectorOwnsData(ftemp_upper) = 0;
hypre_VectorOwnsData(utemp_lower) = 0;
hypre_VectorData(ftemp_upper) = ftemp_data;
hypre_VectorData(utemp_lower) = utemp_data + nLU;
hypre_SeqVectorInitialize(ftemp_upper);
hypre_SeqVectorInitialize(utemp_lower);
if( nLU > 0)
{
BLU_i = hypre_CSRMatrixI(matBLU_d);
BLU_j = hypre_CSRMatrixJ(matBLU_d);
BLU_data = hypre_CSRMatrixData(matBLU_d);
BLU_nnz = BLU_i[nLU];
}
/* begin */
beta = 1.0;
alpha = -1.0;
//gamma = 0.0;
HYPRE_Int isDoublePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double);
HYPRE_Int isSinglePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double) / 2;
hypre_assert(isDoublePrecision || isSinglePrecision);
cusparseHandle_t handle = hypre_HandleCusparseHandle(hypre_handle());
cusparseMatDescr_t descr = hypre_HandleCusparseMatDescr(hypre_handle());
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* 1st need to solve LBi*xi = fi
* L solve, solve xi put in u_temp upper
*/
/* apply permutation before we can start our solve */
HYPRE_THRUST_CALL(gather, perm, perm + n, ftemp_data, utemp_data);
if(nLU > 0)
{
/* This solve won't touch data in utemp, thus, gi is still in utemp_lower */
if(isDoublePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &beta, matL_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBL_info,
(hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &beta, matL_des,
(float *) BLU_data, BLU_i, BLU_j, matBL_info,
(float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
/* 2nd need to compute g'i = gi - Ei*UBi^{-1}*xi
* Ei*UBi^{-1} is exactly the matE_d here
* Now: LBi^{-1}f_i is in ftemp_upper
* gi' is in utemp_lower
*/
hypre_CSRMatrixMatvec(alpha, matE_d, ftemp_upper, beta, utemp_lower);
}
/* 3rd need to solve global Schur Complement M^{-1}Sy = M^{-1}g'
* for now only solve the local system
* solve y put in u_temp lower
* only solve whe S is not NULL
*/
/* setup vectors for solve
* rhs = M^{-1}g'
*/
if(m > 0)
{
if(isDoublePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (hypre_double *) &beta, matL_des,
(hypre_double *) SLU_data, SLU_i, SLU_j, matSL_info,
(hypre_double *) utemp_data + nLU, (hypre_double *) ftemp_data + nLU, ilu_solve_policy, ilu_solve_buffer));
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (hypre_double *) &beta, matU_des,
(hypre_double *) SLU_data, SLU_i, SLU_j, matSU_info,
(hypre_double *) ftemp_data + nLU, (hypre_double *) rhs_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (float *) &beta, matL_des,
(float *) SLU_data, SLU_i, SLU_j, matSL_info,
(float *) utemp_data + nLU, (float *) ftemp_data + nLU, ilu_solve_policy, ilu_solve_buffer));
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (float *) &beta, matU_des,
(float *) SLU_data, SLU_i, SLU_j, matSU_info,
(float *) ftemp_data + nLU, (float *) rhs_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* solve */
/* with tricky initial guess */
//hypre_Vector *tv = hypre_ParVectorLocalVector(x);
//HYPRE_Real *tz = hypre_VectorData(tv);
HYPRE_GMRESSolve(schur_solver,(HYPRE_Matrix)schur_precond,(HYPRE_Vector)rhs,(HYPRE_Vector)x);
/* 4th need to compute zi = xi - LBi^-1*yi
* put zi in f_temp upper
* only do this computation when nLU < n
* U is unsorted, search is expensive when unnecessary
*/
if(nLU > 0)
{
hypre_CSRMatrixMatvec(alpha, matF_d, x_local, beta, ftemp_upper);
/* 5th need to solve UBi*ui = zi */
/* put result in u_temp upper */
if(isDoublePrecision)
{
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &beta, matU_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &beta, matU_des,
(float *) BLU_data, BLU_i, BLU_j, matBU_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* copy lower part solution into u_temp as well */
hypre_TMemcpy(utemp_data + nLU, x_data, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
/* perm back */
HYPRE_THRUST_CALL(scatter,utemp_data, utemp_data + n, perm, ftemp_data);
/* done, now everything are in u_temp, update solution */
hypre_ParVectorAxpy(beta, ftemp, u);
hypre_SeqVectorDestroy(ftemp_upper);
hypre_SeqVectorDestroy(utemp_lower);
return hypre_error_flag;
}
/* Schur Complement solve with GMRES on schur complement, RAP style
* ParCSRMatrix S is already built in ilu data sturcture, here directly use S
* L, D and U factors only have local scope (no off-diagonal processor terms)
* so apart from the residual calculation (which uses A), the solves with the
* L and U factors are local.
* S is the global Schur complement
* schur_solver is a GMRES solver
* schur_precond is the ILU preconditioner for GMRES
* rhs and x are helper vector for solving Schur system
*/
HYPRE_Int
hypre_ILUSolveRAPGMRES(hypre_ParCSRMatrix *A, hypre_ParVector *f,
hypre_ParVector *u, HYPRE_Int *perm,
HYPRE_Int nLU, hypre_ParCSRMatrix *S,
hypre_ParVector *ftemp, hypre_ParVector *utemp, hypre_ParVector *xtemp, hypre_ParVector *ytemp,
HYPRE_Solver schur_solver, HYPRE_Solver schur_precond,
hypre_ParVector *rhs, hypre_ParVector *x, HYPRE_Int *u_end,
cusparseMatDescr_t matL_des, cusparseMatDescr_t matU_des,
csrsv2Info_t matAL_info, csrsv2Info_t matAU_info,
csrsv2Info_t matBL_info, csrsv2Info_t matBU_info,
csrsv2Info_t matSL_info, csrsv2Info_t matSU_info,
hypre_ParCSRMatrix *Aperm, hypre_CSRMatrix *matALU_d, hypre_CSRMatrix *matBLU_d, hypre_CSRMatrix *matE_d, hypre_CSRMatrix *matF_d,
cusparseSolvePolicy_t ilu_solve_policy, void *ilu_solve_buffer, HYPRE_Int test_opt)
{
/* data objects for communication */
// MPI_Comm comm = hypre_ParCSRMatrixComm(A);
/* data objects for temp vector */
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
hypre_Vector *xtemp_local = hypre_ParVectorLocalVector(xtemp);
HYPRE_Real *xtemp_data = hypre_VectorData(xtemp_local);
//hypre_Vector *ytemp_local = hypre_ParVectorLocalVector(ytemp);
//HYPRE_Real *ytemp_data = hypre_VectorData(ytemp_local);
hypre_Vector *rhs_local = hypre_ParVectorLocalVector(rhs);
HYPRE_Real *rhs_data = hypre_VectorData(rhs_local);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
HYPRE_Real *x_data = hypre_VectorData(x_local);
//HYPRE_Int i, j, k1, k2, col;
/* problem size */
HYPRE_Int *ALU_i = hypre_CSRMatrixI(matALU_d);
HYPRE_Int *ALU_j = hypre_CSRMatrixJ(matALU_d);
HYPRE_Real *ALU_data = hypre_CSRMatrixData(matALU_d);
HYPRE_Int *BLU_i = hypre_CSRMatrixI(matBLU_d);
HYPRE_Int *BLU_j = hypre_CSRMatrixJ(matBLU_d);
HYPRE_Real *BLU_data = hypre_CSRMatrixData(matBLU_d);
HYPRE_Int BLU_nnz = BLU_i[nLU];
hypre_CSRMatrix *matSLU_d = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *SLU_i = hypre_CSRMatrixI(matSLU_d);
HYPRE_Int *SLU_j = hypre_CSRMatrixJ(matSLU_d);
HYPRE_Real *SLU_data = hypre_CSRMatrixData(matSLU_d);
HYPRE_Int m = hypre_CSRMatrixNumRows(matSLU_d);
HYPRE_Int n = nLU + m;
HYPRE_Int SLU_nnz = SLU_i[m];
HYPRE_Int ALU_nnz = ALU_i[n];
hypre_Vector *ftemp_upper = hypre_SeqVectorCreate(nLU);
hypre_Vector *utemp_lower = hypre_SeqVectorCreate(m);
hypre_VectorOwnsData(ftemp_upper) = 0;
hypre_VectorOwnsData(utemp_lower) = 0;
hypre_VectorData(ftemp_upper) = ftemp_data;
hypre_VectorData(utemp_lower) = utemp_data + nLU;
hypre_SeqVectorInitialize(ftemp_upper);
hypre_SeqVectorInitialize(utemp_lower);
/* begin */
HYPRE_Real one = 1.0;
HYPRE_Real mone = -1.0;
HYPRE_Real zero = 0.0;
HYPRE_Int isDoublePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double);
HYPRE_Int isSinglePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double) / 2;
hypre_assert(isDoublePrecision || isSinglePrecision);
cusparseHandle_t handle = hypre_HandleCusparseHandle(hypre_handle());
cusparseMatDescr_t descr = hypre_HandleCusparseMatDescr(hypre_handle());
switch(test_opt)
{
case 1: case 3:
{
/* E and F */
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(mone, A, u, one, f, utemp);
/* apply permutation before we can start our solve
* Au=f -> (PAQ)Q'u=Pf
*/
HYPRE_THRUST_CALL(gather, perm, perm + n, utemp_data, ftemp_data);
/* A-smoothing
* x = [UA\(LA\(P*f_u))] fill to xtemp
*/
if(n > 0)
{
if(isDoublePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) ALU_data, ALU_i, ALU_j, matAL_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) ALU_data, ALU_i, ALU_j, matAU_info,
(hypre_double *) utemp_data, (hypre_double *) xtemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (float *) &one, matL_des,
(float *) ALU_data, ALU_i, ALU_j, matAL_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (float *) &one, matU_des,
(float *) ALU_data, ALU_i, ALU_j, matAU_info,
(float *) utemp_data, (float *) xtemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* residual, we should not touch xtemp for now
* r = R*(f-PAQx)
*/
hypre_ParCSRMatrixMatvec(mone, Aperm, xtemp, one, ftemp);
/* with R is complex */
/* copy partial data in */
hypre_TMemcpy( rhs_data, ftemp_data + nLU, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
/* solve L^{-1} */
if(nLU > 0)
{
if(isDoublePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBL_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matL_des,
(float *) BLU_data, BLU_i, BLU_j, matBL_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
/* -U^{-1}L^{-1} */
if(isDoublePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info,
(hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matU_des,
(float *) BLU_data, BLU_i, BLU_j, matBU_info,
(float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* -EU^{-1}L^{-1} */
hypre_CSRMatrixMatvec(mone, matE_d, ftemp_upper, one, rhs_local);
/* now solve S
*/
if(S)
{
/* if we have a schur complement */
hypre_ParVectorSetConstantValues(x, 0.0);
HYPRE_GMRESSolve(schur_solver,(HYPRE_Matrix)schur_precond,(HYPRE_Vector)rhs,(HYPRE_Vector)x);
/* u = xtemp + P*x */
/* -Fx */
hypre_CSRMatrixMatvec(mone, matF_d, x_local, zero, ftemp_upper);
/* -L^{-1}Fx */
if(nLU > 0)
{
if(isDoublePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBL_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matL_des,
(float *) BLU_data, BLU_i, BLU_j, matBL_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
/* -U{-1}L^{-1}Fx */
if(isDoublePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info,
(hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matU_des,
(float *) BLU_data, BLU_i, BLU_j, matBU_info,
(float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* now copy data to y_lower */
hypre_TMemcpy( ftemp_data + nLU, x_data, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
/* correction to the residual */
hypre_ParVectorAxpy(one, ftemp, xtemp);
}
else
{
/* otherwise just apply triangular solves */
if(m > 0)
{
if(isDoublePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) SLU_data, SLU_i, SLU_j, matSL_info,
(hypre_double *) rhs_data, (hypre_double *) x_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (float *) &one, matL_des,
(float *) SLU_data, SLU_i, SLU_j, matSL_info,
(float *) rhs_data, (float *) x_data, ilu_solve_policy, ilu_solve_buffer));
}
if(isDoublePrecision)
{
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) SLU_data, SLU_i, SLU_j, matSU_info,
(hypre_double *) x_data, (hypre_double *) rhs_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (float *) &one, matU_des,
(float *) SLU_data, SLU_i, SLU_j, matSU_info,
(float *) x_data, (float *) rhs_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* u = xtemp + P*x */
/* -Fx */
hypre_CSRMatrixMatvec(mone, matF_d, rhs_local, zero, ftemp_upper);
/* -L^{-1}Fx */
if(nLU > 0)
{
if(isDoublePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBL_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matL_des,
(float *) BLU_data, BLU_i, BLU_j, matBL_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
/* -U{-1}L^{-1}Fx */
if(isDoublePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info,
(hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matU_des,
(float *) BLU_data, BLU_i, BLU_j, matBU_info,
(float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* now copy data to y_lower */
hypre_TMemcpy( ftemp_data + nLU, rhs_data, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
hypre_ParVectorAxpy(one, ftemp, xtemp);
}
/* perm back */
HYPRE_THRUST_CALL(scatter,xtemp_data, xtemp_data + n, perm, ftemp_data);
/* done, now everything are in u_temp, update solution */
hypre_ParVectorAxpy(one, ftemp, u);
}
break;
case 0: case 2: default:
{
/* EU^{-1} and L^{-1}F */
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(mone, A, u, one, f, ftemp);
/* apply permutation before we can start our solve
* Au=f -> (PAQ)Q'u=Pf
*/
HYPRE_THRUST_CALL(gather, perm, perm + n, ftemp_data, utemp_data);
/* A-smoothing
* x = [UA\(LA\(P*f_u))] fill to xtemp
*/
if(n > 0)
{
if(isDoublePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) ALU_data, ALU_i, ALU_j, matAL_info,
(hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) ALU_data, ALU_i, ALU_j, matAU_info,
(hypre_double *) ftemp_data, (hypre_double *) xtemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (float *) &one, matL_des,
(float *) ALU_data, ALU_i, ALU_j, matAL_info,
(float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, ALU_nnz, (float *) &one, matU_des,
(float *) ALU_data, ALU_i, ALU_j, matAU_info,
(float *) ftemp_data, (float *) xtemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* residual, we should not touch xtemp for now
* r = R*(f-PAQx)
*/
hypre_ParCSRMatrixMatvec(mone, Aperm, xtemp, one, utemp);
/* with R is complex */
/* copy partial data in */
hypre_TMemcpy( rhs_data, utemp_data + nLU, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
/* solve L^{-1} */
if(nLU > 0)
{
if(isDoublePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBL_info,
(hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matL_des,
(float *) BLU_data, BLU_i, BLU_j, matBL_info,
(float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* -EU^{-1}L^{-1} */
hypre_CSRMatrixMatvec(mone, matE_d, ftemp_upper, one, rhs_local);
/* now solve S
*/
if(S)
{
/* if we have a schur complement */
hypre_ParVectorSetConstantValues(x, 0.0);
HYPRE_GMRESSolve(schur_solver,(HYPRE_Matrix)schur_precond,(HYPRE_Vector)rhs,(HYPRE_Vector)x);
/* u = xtemp + P*x */
/* -L^{-1}Fx */
hypre_CSRMatrixMatvec(mone, matF_d, x_local, zero, ftemp_upper);
/* -U{-1}L^{-1}Fx */
if(nLU > 0)
{
if(isDoublePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matU_des,
(float *) BLU_data, BLU_i, BLU_j, matBU_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* now copy data to y_lower */
hypre_TMemcpy( utemp_data + nLU, x_data, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
hypre_ParVectorAxpy(one, utemp, xtemp);
}
else
{
/* otherwise just apply triangular solves */
if(m > 0)
{
if(isDoublePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (hypre_double *) &one, matL_des,
(hypre_double *) SLU_data, SLU_i, SLU_j, matSL_info,
(hypre_double *) rhs_data, (hypre_double *) x_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* L solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (float *) &one, matL_des,
(float *) SLU_data, SLU_i, SLU_j, matSL_info,
(float *) rhs_data, (float *) x_data, ilu_solve_policy, ilu_solve_buffer));
}
if(isDoublePrecision)
{
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) SLU_data, SLU_i, SLU_j, matSU_info,
(hypre_double *) x_data, (hypre_double *) rhs_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve - Forward solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
m, SLU_nnz, (float *) &one, matU_des,
(float *) SLU_data, SLU_i, SLU_j, matSU_info,
(float *) x_data, (float *) rhs_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* u = xtemp + P*x */
/* -L^{-1}Fx */
hypre_CSRMatrixMatvec(mone, matF_d, rhs_local, zero, ftemp_upper);
/* -U{-1}L^{-1}Fx */
if(nLU > 0)
{
if(isDoublePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (hypre_double *) &one, matU_des,
(hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info,
(hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
else if(isSinglePrecision)
{
/* U solve */
HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
nLU, BLU_nnz, (float *) &one, matU_des,
(float *) BLU_data, BLU_i, BLU_j, matBU_info,
(float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer));
}
}
/* now copy data to y_lower */
hypre_TMemcpy( utemp_data + nLU, rhs_data, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
hypre_ParVectorAxpy(one, utemp, xtemp);
}
/* perm back */
HYPRE_THRUST_CALL(scatter,xtemp_data, xtemp_data + n, perm, ftemp_data);
/* done, now everything are in u_temp, update solution */
hypre_ParVectorAxpy(one, ftemp, u);
}
break;
}
return hypre_error_flag;
}
#endif
HYPRE_Int
hypre_ILUSolveRAPGMRESHOST(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int *perm,
HYPRE_Int nLU, hypre_ParCSRMatrix *L, HYPRE_Real *D, hypre_ParCSRMatrix *U,
hypre_ParCSRMatrix *mL, HYPRE_Real *mD, hypre_ParCSRMatrix *mU,
hypre_ParVector *ftemp, hypre_ParVector *utemp,
hypre_ParVector *xtemp, hypre_ParVector *ytemp,
HYPRE_Solver schur_solver, HYPRE_Solver schur_precond,
hypre_ParVector *rhs, hypre_ParVector *x, HYPRE_Int *u_end)
{
//#pragma omp parallel
// printf("threads %d\n",omp_get_num_threads());
/* data objects for communication */
// MPI_Comm comm = hypre_ParCSRMatrixComm(A);
/* data objects for L and U */
hypre_CSRMatrix *L_diag = hypre_ParCSRMatrixDiag(L);
HYPRE_Real *L_diag_data = hypre_CSRMatrixData(L_diag);
HYPRE_Int *L_diag_i = hypre_CSRMatrixI(L_diag);
HYPRE_Int *L_diag_j = hypre_CSRMatrixJ(L_diag);
hypre_CSRMatrix *U_diag = hypre_ParCSRMatrixDiag(U);
HYPRE_Real *U_diag_data = hypre_CSRMatrixData(U_diag);
HYPRE_Int *U_diag_i = hypre_CSRMatrixI(U_diag);
HYPRE_Int *U_diag_j = hypre_CSRMatrixJ(U_diag);
hypre_CSRMatrix *mL_diag = hypre_ParCSRMatrixDiag(mL);
HYPRE_Real *mL_diag_data = hypre_CSRMatrixData(mL_diag);
HYPRE_Int *mL_diag_i = hypre_CSRMatrixI(mL_diag);
HYPRE_Int *mL_diag_j = hypre_CSRMatrixJ(mL_diag);
hypre_CSRMatrix *mU_diag = hypre_ParCSRMatrixDiag(mU);
HYPRE_Real *mU_diag_data = hypre_CSRMatrixData(mU_diag);
HYPRE_Int *mU_diag_i = hypre_CSRMatrixI(mU_diag);
HYPRE_Int *mU_diag_j = hypre_CSRMatrixJ(mU_diag);
hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp);
HYPRE_Real *utemp_data = hypre_VectorData(utemp_local);
hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp);
HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local);
hypre_Vector *xtemp_local = NULL;
HYPRE_Real *xtemp_data = NULL;
hypre_Vector *ytemp_local = NULL;
HYPRE_Real *ytemp_data = NULL;
if(xtemp)
{
/* xtemp might be null when we have no Schur complement */
xtemp_local = hypre_ParVectorLocalVector(xtemp);
xtemp_data = hypre_VectorData(xtemp_local);
ytemp_local = hypre_ParVectorLocalVector(ytemp);
ytemp_data = hypre_VectorData(ytemp_local);
}
HYPRE_Real alpha;
HYPRE_Real beta;
HYPRE_Int i, j, k1, k2, col;
/* problem size */
HYPRE_Int n = hypre_CSRMatrixNumRows(L_diag);
HYPRE_Int m = n - nLU;
/* other data objects for computation */
//hypre_Vector *f_local;
//HYPRE_Real *f_data;
hypre_Vector *rhs_local;
HYPRE_Real *rhs_data;
hypre_Vector *x_local;
HYPRE_Real *x_data;
/* begin */
beta = 1.0;
alpha = -1.0;
if(m > 0)
{
/* setup vectors for solve */
rhs_local = hypre_ParVectorLocalVector(rhs);
rhs_data = hypre_VectorData(rhs_local);
x_local = hypre_ParVectorLocalVector(x);
x_data = hypre_VectorData(x_local);
}
/* only support RAP with partial factorized W and Z */
/* compute residual */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* A-smoothing f_temp = [UA \ LA \ (f_temp[perm])] */
/* permuted L solve */
for(i = 0 ; i < n ; i ++)
{
utemp_data[i] = ftemp_data[perm[i]];
k1 = L_diag_i[i] ; k2 = L_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = L_diag_j[j];
utemp_data[i] -= L_diag_data[j] * utemp_data[col];
}
}
if(!xtemp)
{
/* in this case, we don't have a Schur complement */
/* U solve */
for(i = n-1 ; i >= 0 ; i --)
{
ftemp_data[perm[i]] = utemp_data[i];
k1 = U_diag_i[i] ; k2 = U_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = U_diag_j[j];
ftemp_data[perm[i]] -= U_diag_data[j] * ftemp_data[perm[col]];
}
ftemp_data[perm[i]] *= D[i];
}
hypre_ParVectorAxpy(beta, ftemp, u);
return hypre_error_flag;
}
/* U solve */
for(i = n-1 ; i >= 0 ; i --)
{
xtemp_data[perm[i]] = utemp_data[i];
k1 = U_diag_i[i] ; k2 = U_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = U_diag_j[j];
xtemp_data[perm[i]] -= U_diag_data[j] * xtemp_data[perm[col]];
}
xtemp_data[perm[i]] *= D[i];
}
/* coarse-grid correction */
/* now f_temp is the result of A-smoothing
* rhs = R*(b - Ax)
* */
// utemp = (ftemp - A*xtemp)
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, xtemp, beta, ftemp, utemp);
// R = [-L21 L\inv, I]
if( m > 0)
{
/* first is L solve */
for(i = 0 ; i < nLU ; i ++)
{
ytemp_data[i] = utemp_data[perm[i]];
k1 = mL_diag_i[i] ; k2 = mL_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = mL_diag_j[j];
ytemp_data[i] -= mL_diag_data[j] * ytemp_data[col];
}
}
/* apply -W * ytemp on this, and take care of the I part */
for(i = nLU ; i < n ; i ++)
{
rhs_data[i - nLU] = utemp_data[perm[i]];
k1 = mL_diag_i[i] ; k2 = u_end[i];
for(j = k1 ; j < k2 ; j ++)
{
col = mL_diag_j[j];
rhs_data[i - nLU] -= mL_diag_data[j] * ytemp_data[col];
}
}
}
/* now the rhs is ready */
hypre_SeqVectorSetConstantValues(x_local, 0.0);
HYPRE_GMRESSolve(schur_solver,(HYPRE_Matrix)schur_precond,(HYPRE_Vector)rhs,(HYPRE_Vector)x);
if(m > 0)
{
/*
for(i = 0 ; i < m ; i ++)
{
x_data[i] = rhs_data[i];
k1 = u_end[i+nLU] ; k2 = mL_diag_i[i+nLU+1];
for(j = k1 ; j < k2 ; j ++)
{
col = mL_diag_j[j];
x_data[i] -= mL_diag_data[j] * x_data[col-nLU];
}
}
for(i = m-1 ; i >= 0 ; i --)
{
rhs_data[i] = x_data[i];
k1 = mU_diag_i[i+nLU] ; k2 = mU_diag_i[i+1+nLU];
for(j = k1 ; j < k2 ; j ++)
{
col = mU_diag_j[j];
rhs_data[i] -= mU_diag_data[j] * rhs_data[col-nLU];
}
rhs_data[i] *= mD[i];
}
*/
/* after solve, update x = x + Pv
* that is, xtemp = xtemp + P*x
*/
/* first compute P*x
* P = [ -U\inv U_12 ]
* [ I ]
*/
/* matvec */
for(i = 0 ; i < nLU ; i ++)
{
ytemp_data[i] = 0.0;
k1 = u_end[i] ; k2 = mU_diag_i[i+1];
for(j = k1 ; j < k2 ; j ++)
{
col = mU_diag_j[j];
ytemp_data[i] -= mU_diag_data[j] * x_data[col-nLU];
}
}
/* U solve */
for(i = nLU-1 ; i >= 0 ; i --)
{
ftemp_data[perm[i]] = ytemp_data[i];
k1 = mU_diag_i[i] ; k2 = u_end[i];
for(j = k1 ; j < k2 ; j ++)
{
col = mU_diag_j[j];
ftemp_data[perm[i]] -= mU_diag_data[j] * ftemp_data[perm[col]];
}
ftemp_data[perm[i]] *= mD[i];
}
/* update with I */
for(i = nLU ; i < n ; i ++)
{
ftemp_data[perm[i]] = x_data[i-nLU];
}
hypre_ParVectorAxpy(beta, ftemp, u);
}
hypre_ParVectorAxpy(beta, xtemp, u);
return hypre_error_flag;
}
/* solve functions for NSH */
/*--------------------------------------------------------------------
* hypre_NSHSolve
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_NSHSolve( void *nsh_vdata,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
// HYPRE_Int i;
hypre_ParNSHData *nsh_data = (hypre_ParNSHData*) nsh_vdata;
/* get matrices */
hypre_ParCSRMatrix *matA = hypre_ParNSHDataMatA(nsh_data);
hypre_ParCSRMatrix *matM = hypre_ParNSHDataMatM(nsh_data);
HYPRE_Int iter, num_procs, my_id;
hypre_ParVector *F_array = hypre_ParNSHDataF(nsh_data);
hypre_ParVector *U_array = hypre_ParNSHDataU(nsh_data);
/* get settings */
HYPRE_Real tol = hypre_ParNSHDataTol(nsh_data);
HYPRE_Int logging = hypre_ParNSHDataLogging(nsh_data);
HYPRE_Int print_level = hypre_ParNSHDataPrintLevel(nsh_data);
HYPRE_Int max_iter = hypre_ParNSHDataMaxIter(nsh_data);
HYPRE_Real *norms = hypre_ParNSHDataRelResNorms(nsh_data);
hypre_ParVector *Ftemp = hypre_ParNSHDataFTemp(nsh_data);
hypre_ParVector *Utemp = hypre_ParNSHDataUTemp(nsh_data);
hypre_ParVector *residual;
HYPRE_Real alpha = -1.0;
HYPRE_Real beta = 1.0;
HYPRE_Real conv_factor = 0.0;
HYPRE_Real resnorm = 1.0;
HYPRE_Real init_resnorm = 0.0;
HYPRE_Real rel_resnorm;
HYPRE_Real rhs_norm = 0.0;
HYPRE_Real old_resnorm;
HYPRE_Real ieee_check = 0.;
HYPRE_Real operat_cmplxty = hypre_ParNSHDataOperatorComplexity(nsh_data);
HYPRE_Int Solve_err_flag;
/* problem size */
// HYPRE_Int n = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
/* begin */
if(logging > 1)
{
residual = hypre_ParNSHDataResidual(nsh_data);
}
hypre_ParNSHDataNumIterations(nsh_data) = 0;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
/*-----------------------------------------------------------------------
* Write the solver parameters
*-----------------------------------------------------------------------*/
if (my_id == 0 && print_level > 1)
{
hypre_NSHWriteSolverParams(nsh_data);
}
/*-----------------------------------------------------------------------
* Initialize the solver error flag
*-----------------------------------------------------------------------*/
Solve_err_flag = 0;
/*-----------------------------------------------------------------------
* write some initial info
*-----------------------------------------------------------------------*/
if (my_id == 0 && print_level > 1 && tol > 0.)
{
hypre_printf("\n\n Newton–Schulz–Hotelling SOLVER SOLUTION INFO:\n");
}
/*-----------------------------------------------------------------------
* Compute initial residual and print
*-----------------------------------------------------------------------*/
if (print_level > 1 || logging > 1 || tol > 0.)
{
if ( logging > 1 )
{
hypre_ParVectorCopy(f, residual );
if (tol > 0.0)
{
hypre_ParCSRMatrixMatvec(alpha, A, u, beta, residual );
}
resnorm = sqrt(hypre_ParVectorInnerProd( residual, residual ));
}
else
{
hypre_ParVectorCopy(f, Ftemp);
if (tol > 0.0)
{
hypre_ParCSRMatrixMatvec(alpha, A, u, beta, Ftemp);
}
resnorm = sqrt(hypre_ParVectorInnerProd(Ftemp, Ftemp));
}
/* Since it is does not diminish performance, attempt to return an error flag
and notify users when they supply bad input. */
if (resnorm != 0.)
{
ieee_check = resnorm/resnorm; /* INF -> NaN conversion */
}
if (ieee_check != ieee_check)
{
/* ...INFs or NaNs in input can make ieee_check a NaN. This test
for ieee_check self-equality works on all IEEE-compliant compilers/
machines, c.f. page 8 of "Lecture Notes on the Status of IEEE 754"
by W. Kahan, May 31, 1996. Currently (July 2002) this paper may be
found at http://HTTP.CS.Berkeley.EDU/~wkahan/ieee754status/IEEE754.PDF */
if (print_level > 0)
{
hypre_printf("\n\nERROR detected by Hypre ... BEGIN\n");
hypre_printf("ERROR -- hypre_NSHSolve: INFs and/or NaNs detected in input.\n");
hypre_printf("User probably placed non-numerics in supplied A, x_0, or b.\n");
hypre_printf("ERROR detected by Hypre ... END\n\n\n");
}
hypre_error(HYPRE_ERROR_GENERIC);
return hypre_error_flag;
}
init_resnorm = resnorm;
rhs_norm = sqrt(hypre_ParVectorInnerProd(f, f));
if (rhs_norm > HYPRE_REAL_EPSILON)
{
rel_resnorm = init_resnorm / rhs_norm;
}
else
{
/* rhs is zero, return a zero solution */
hypre_ParVectorSetConstantValues(U_array, 0.0);
if(logging > 0)
{
rel_resnorm = 0.0;
hypre_ParNSHDataFinalRelResidualNorm(nsh_data) = rel_resnorm;
}
return hypre_error_flag;
}
}
else
{
rel_resnorm = 1.;
}
if (my_id == 0 && print_level > 1)
{
hypre_printf(" relative\n");
hypre_printf(" residual factor residual\n");
hypre_printf(" -------- ------ --------\n");
hypre_printf(" Initial %e %e\n",init_resnorm,
rel_resnorm);
}
matA = A;
U_array = u;
F_array = f;
/************** Main Solver Loop - always do 1 iteration ************/
iter = 0;
while ((rel_resnorm >= tol || iter < 1)
&& iter < max_iter)
{
/* Do one solve on e = Mr */
hypre_NSHSolveInverse(matA, f, u, matM, Utemp, Ftemp);
/*---------------------------------------------------------------
* Compute residual and residual norm
*----------------------------------------------------------------*/
if (print_level > 1 || logging > 1 || tol > 0.)
{
old_resnorm = resnorm;
if ( logging > 1 ) {
hypre_ParVectorCopy(F_array, residual);
hypre_ParCSRMatrixMatvec(alpha, matA, U_array, beta, residual );
resnorm = sqrt(hypre_ParVectorInnerProd( residual, residual ));
}
else {
hypre_ParVectorCopy(F_array, Ftemp);
hypre_ParCSRMatrixMatvec(alpha, matA, U_array, beta, Ftemp);
resnorm = sqrt(hypre_ParVectorInnerProd(Ftemp, Ftemp));
}
if (old_resnorm) conv_factor = resnorm / old_resnorm;
else conv_factor = resnorm;
if (rhs_norm > HYPRE_REAL_EPSILON)
{
rel_resnorm = resnorm / rhs_norm;
}
else
{
rel_resnorm = resnorm;
}
norms[iter] = rel_resnorm;
}
++iter;
hypre_ParNSHDataNumIterations(nsh_data) = iter;
hypre_ParNSHDataFinalRelResidualNorm(nsh_data) = rel_resnorm;
if (my_id == 0 && print_level > 1)
{
hypre_printf(" NSHSolve %2d %e %f %e \n", iter,
resnorm, conv_factor, rel_resnorm);
}
}
/* check convergence within max_iter */
if (iter == max_iter && tol > 0.)
{
Solve_err_flag = 1;
hypre_error(HYPRE_ERROR_CONV);
}
/*-----------------------------------------------------------------------
* Print closing statistics
* Add operator and grid complexity stats
*-----------------------------------------------------------------------*/
if (iter > 0 && init_resnorm)
{
conv_factor = pow((resnorm/init_resnorm),(1.0/(HYPRE_Real) iter));
}
else
{
conv_factor = 1.;
}
if (print_level > 1)
{
/*** compute operator and grid complexity (fill factor) here ?? ***/
if (my_id == 0)
{
if (Solve_err_flag == 1)
{
hypre_printf("\n\n==============================================");
hypre_printf("\n NOTE: Convergence tolerance was not achieved\n");
hypre_printf(" within the allowed %d iterations\n",max_iter);
hypre_printf("==============================================");
}
hypre_printf("\n\n Average Convergence Factor = %f \n",conv_factor);
hypre_printf(" operator = %f\n",operat_cmplxty);
}
}
return hypre_error_flag;
}
/* NSH solve
* Simply a matvec on residual with approximate inverse
* A: original matrix
* f: rhs
* u: solution
* M: approximate inverse
* ftemp, utemp: working vectors
*/
HYPRE_Int
hypre_NSHSolveInverse(hypre_ParCSRMatrix *A, hypre_ParVector *f,
hypre_ParVector *u, hypre_ParCSRMatrix *M,
hypre_ParVector *ftemp, hypre_ParVector *utemp)
{
HYPRE_Real alpha;
HYPRE_Real beta;
/* begin */
alpha = -1.0;
beta = 1.0;
/* r = f-Au */
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp);
/* e = Mr */
hypre_ParCSRMatrixMatvec(1.0, M, ftemp, 0.0, utemp);
/* u = u + e */
hypre_ParVectorAxpy(beta, utemp, u);
return hypre_error_flag;
}
|
tsp_hh06.c | /*
Description:
This program executes my implementation of the "Heinritz Hsiao" algorithm to solve the "Travelling Salesman Problem"
Next city in path is either the closest or second closest one, depending on the value of <PICK_CLOSEST_CITY_POSSIBILITY>
Abides by Lab 3 Exercise 5 requirements
Author:
Georgios Evangelou (1046900)
Year: 5
Parallel Programming in Machine Learning Problems
Electrical and Computer Engineering Department, University of Patras
System Specifications:
CPU: AMD Ryzen 2600 (6 cores/12 threads, @3.8 GHz, 6786.23 bogomips)
GPU: Nvidia GTX 1050 (dual-fan, overclocked)
RAM: 8GB (dual-channel, @2666 MHz)
Version Notes:
Compiles/Runs/Debugs with: gcc tsp_hh06.c -o tsp_hh06 -lm -O3 -pg -fopenmp && time ./tsp_hh06 && gprof ./tsp_hh06
Executes the algorithm for 10.000 cities, spanning in an area of 1.000x1.000 km and produces correct results
Inherits all settings of versions tsp_hh04 and tsp_hh05, unless stated otherwise
Function IsInPath() is substituted by boolean array
Needs a little more time than tsp_hh05, because of the parallelism overhead, but produces slightly better results
Results when: PICK_CLOSEST_CITY_POSSIBILITY = 1.00 ===> Minimum total path distance: 89515.94
PICK_CLOSEST_CITY_POSSIBILITY = 0.95 ===> Minimum total path distance: 90720.28
PICK_CLOSEST_CITY_POSSIBILITY = 0.90 ===> Minimum total path distance: 94532.01
PICK_CLOSEST_CITY_POSSIBILITY = 0.85 ===> Minimum total path distance: 97698.78
PICK_CLOSEST_CITY_POSSIBILITY = 0.80 ===> Minimum total path distance: 101386.71
PICK_CLOSEST_CITY_POSSIBILITY = 0.75 ===> Minimum total path distance: 103783.51
Needs: ~ 0.9 seconds to calculate an optimal path using 12 threads and all optimizations listed below
*/
// ****************************************************************************************************************
#pragma GCC optimize("O3","unroll-loops","omit-frame-pointer","inline") //Apply O3 and extra optimizations
#pragma GCC option("arch=native","tune=native","no-zero-upper") //Adapt to the current system
#pragma GCC target("avx") //Enable AVX
// ****************************************************************************************************************
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "omp.h"
#include "stdbool.h"
// ****************************************************************************************************************
#define N 10000
#define Nx 1000
#define Ny 1000
#define nonExist -999999
#define PICK_CLOSEST_CITY_POSSIBILITY 0.90
#define THREADS 12
// ****************************************************************************************************************
float CitiesX[N];
float CitiesY[N];
int ThreadsPath[THREADS][N+1];
double CalculatedDistances[N][N];
// ****************************************************************************************************************
// Initializes the cities' positions
// ****************************************************************************************************************
void SetCities() {
printf("Now initializing the positions of the cities...\n");
for (int i=0; i<N; i++) {
CitiesX[i] = Nx * (float) rand() / RAND_MAX;
CitiesY[i] = Ny * (float) rand() / RAND_MAX;
}
}
// ****************************************************************************************************************
// Prints the cities' positions
// ****************************************************************************************************************
void PrintCities() {
printf("> The cities are:\n");
for (int i=0; i<N; i++) {
printf(">> City: %6d X:%5.2f Y:%5.2f\n", i, CitiesX[i], CitiesY[i] );
}
printf("\n");
}
// ****************************************************************************************************************
// Prints the travelling path
// ****************************************************************************************************************
void PrintPath_2(int Path[]) {
printf("> The path is:\n");
for (int i=0; i<N+1; i++) {
printf(">> %d ", Path[i]);
}
printf("\n");
}
// ****************************************************************************************************************
// Visually maps the cities' positions
// ****************************************************************************************************************
void MapCities() {
int Map[Ny+1][Nx+1];
printf("Now creating a visual map of the cities...\n");
for (int i=0; i<Nx+1; i++)
for (int j=0; j<Ny+1; j++)
Map[j][i] = (float) nonExist;
//printf("Quantized coordinates are:\n");
for (int c=0; c<N; c++) {
int x = (int) CitiesX[c] ;
int y = (int) CitiesY[c] ;
//printf(" City:%d y=%d and x=%d\n",c,y,x);
if (Map[y][x] == nonExist) Map[y][x] = c;
else Map[y][x] = -1;
}
printf("This is the cities' map:\n");
printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
for (int y=0; y<Ny+1; y++){
for (int x=0; x<Nx+1; x++)
printf("%8d ", Map[y][x]);
printf("\n");
}
printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
printf("\n");
}
// ****************************************************************************************************************
// Finds Euclidean distance between two cities
// ****************************************************************************************************************
double Distance(int A, int B) {
return (double) sqrt( (CitiesX[A]-CitiesX[B])*(CitiesX[A]-CitiesX[B]) + (CitiesY[A]-CitiesY[B])*(CitiesY[A]-CitiesY[B]) );
}
// ****************************************************************************************************************
// Finds Eucleidian distance in a given path
// ****************************************************************************************************************
double PathDistance_2(int Path[]) {
double totDist = 0.0;
for (int i=0; i<N; i++) {
totDist += Distance(Path[i], Path[i+1]);
}
totDist += Distance(Path[N], Path[0]);
return totDist;
}
// ****************************************************************************************************************
// Finds all Eucleidian distances between all pairs of cities
// ****************************************************************************************************************
void CalculateAllDistances() {
printf("Now calculating distances between all pairs of cities...\n");
for (int i=0; i<N; i++) {
printf("\r> Progress: %.2f%%", 100*(i+1)/((float)N));
for (int j=i+1; j<N; j++) {
double temp = Distance(i, j);
CalculatedDistances[i][j] = temp;
CalculatedDistances[j][i] = temp;
}
}
printf(" ===> Completed.\n");
}
// ****************************************************************************************************************
// Finds the travelling path by visiting the closest or second closest non-visited city each time
// ****************************************************************************************************************
double FindShortestStepPath_2() {
#pragma omp master
{
printf("Now finding the shortest / second shortest step path...\n");
printf("> Threads running independently in parallel: %d\n", omp_get_num_threads());
}
double totDist = 0.0;
int visited_cities = 1, current_city = 0, thread = omp_get_thread_num();
bool CityIsVisited[N]; for (int i=0; i<N; i++) CityIsVisited[i] = false;
ThreadsPath[thread][0] = current_city; ThreadsPath[thread][N] = current_city; CityIsVisited[current_city] = false;
do {
#pragma omp master
printf("\r> Progress: %.2f%%", 100*(visited_cities)/((float)N));
double dist = 0, min_dist_1 = INFINITY, min_dist_2 = INFINITY;
int closest_city_1 = -1, closest_city_2 = -1;
for (int i=0; i<N; i++) {
if (CityIsVisited[i] == true) continue; //If we are trying to access current city or a visited one, go to next
dist = CalculatedDistances[current_city][i];
if (min_dist_1 > dist) {
min_dist_2 = min_dist_1; closest_city_2 = closest_city_1;
min_dist_1 = dist; closest_city_1 = i;
} else if (min_dist_2 > dist) {
min_dist_2 = dist; closest_city_2 = i;
}
}
unsigned seed = 11*visited_cities + 83*thread + 11*omp_get_wtime() + current_city;
float random_number = ((float)rand_r(&seed)) / ((float)RAND_MAX) ;
int city_pick = (random_number<PICK_CLOSEST_CITY_POSSIBILITY) ? 1 : 2;
int next_city = (city_pick==1) ? closest_city_1 : closest_city_2;
ThreadsPath[thread][visited_cities++] = next_city;
CityIsVisited[next_city] = true;
current_city = next_city;
totDist += (city_pick==1) ? min_dist_1 : min_dist_2;;
} while (visited_cities<N);
totDist += CalculatedDistances[ThreadsPath[thread][N-1]][0];
#pragma omp barrier
#pragma omp single
printf("\r> Progress: 100.00%% ===> Completed.\n");
#pragma omp barrier
//printf(">> I am thread #(%2d) and my total path distance is: %lf.02\n", thread, totDist);
return totDist;
}
// ****************************************************************************************************************
// The main program
// ****************************************************************************************************************
int main( int argc, const char* argv[] ) {
printf("------------------------------------------------------------------------------\n");
printf("This program searches for the optimal traveling distance between %d cities,\n", N);
printf("spanning in an area of X=(0,%d) and Y=(0,%d)\n", Nx, Ny);
printf("------------------------------------------------------------------------------\n");
srand(1046900);
SetCities();
CalculateAllDistances();
double totDistEstimation = INFINITY;
#pragma omp parallel reduction(min:totDistEstimation) num_threads(THREADS)
{
totDistEstimation = FindShortestStepPath_2();
}
printf("\n");
printf("Minimum total path distance found is: %.2lf\n", totDistEstimation);
return 0 ;
}
|
geli_fmt_plug.c | /*
* JtR format to crack password protected FreeBSD GELI volumes.
*
* This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it
* is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_geli;
#elif FMT_REGISTERS_H
john_register_one(&fmt_geli);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "hmac_sha.h"
#include "aes.h"
#include "pbkdf2_hmac_sha512.h"
#include "jumbo.h"
#include "memdbg.h"
#include "geli_common.h"
#define FORMAT_LABEL "geli"
#define FORMAT_NAME "FreeBSD GELI"
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME "PBKDF2-SHA512 " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(*cur_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked, cracked_count;
static custom_salt *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
cracked_count = self->params.max_keys_per_crypt;
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static void set_salt(void *salt)
{
cur_salt = (custom_salt *)salt;
}
static void geli_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
memset(cracked, 0, sizeof(cracked[0])*cracked_count);
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char master[MAX_KEYS_PER_CRYPT][G_ELI_USERKEYLEN];
unsigned char key[MAX_KEYS_PER_CRYPT][G_ELI_USERKEYLEN];
int i;
#ifdef SIMD_COEF_64
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
pout[i] = master[i];
}
pbkdf2_sha512_sse((const unsigned char**)pin, lens, cur_salt->md_salt, G_ELI_SALTLEN, cur_salt->md_iterations, pout, G_ELI_USERKEYLEN, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
pbkdf2_sha512((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->md_salt, G_ELI_SALTLEN, cur_salt->md_iterations, master[i], G_ELI_USERKEYLEN, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
JTR_hmac_sha512((const unsigned char*)"", 0, master[i], G_ELI_USERKEYLEN, key[i], G_ELI_USERKEYLEN);
cracked[index+i] = geli_decrypt_verify(cur_salt, key[i]);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_geli = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
geli_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
geli_common_valid,
fmt_default_split,
fmt_default_binary,
geli_common_get_salt,
{
geli_common_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
geli_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
comm.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Copyright (c) 2015 by Contributors
*/
#ifndef MXNET_KVSTORE_COMM_H_
#define MXNET_KVSTORE_COMM_H_
#include <dmlc/omp.h>
#include <string>
#include <algorithm>
#include <utility>
#include <limits>
#include <vector>
#include <tuple>
#include <thread>
#include "mxnet/ndarray.h"
#include "gradient_compression.h"
#include "../ndarray/ndarray_function.h"
#include "../operator/tensor/sparse_retain-inl.h"
#include "./kvstore_utils.h"
namespace mxnet {
namespace kvstore {
/**
* \brief multiple device commmunication
*/
class Comm {
public:
Comm() {
pinned_ctx_ = Context::CPUPinned(0);
}
virtual ~Comm() { }
/**
* \brief init key with the data shape and storage shape
*/
virtual void Init(int key, const NDArrayStorageType stype,
const TShape& shape, int dtype = mshadow::kFloat32) = 0;
/**
* \brief returns src[0] + .. + src[src.size()-1]
*/
virtual const NDArray& Reduce(
int key, const std::vector<NDArray>& src, int priority) = 0;
/**
* \brief copy from src to dst[i] for every i
*/
virtual void Broadcast(
int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) = 0;
/**
* \brief broadcast src to dst[i] with target row_ids for every i
* \param key the identifier key for the stored ndarray
* \param src the source row_sparse ndarray to broadcast
* \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast,
where the row_ids are expected to be unique and sorted in row_id.data()
* \param priority the priority of the operation
*/
virtual void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) = 0;
/**
* \brief return a pinned contex
*/
Context pinned_ctx() const {
return pinned_ctx_;
}
/**
* \brief Sets gradient compression parameters to be able to
* perform reduce with compressed gradients
*/
void SetGradientCompression(std::shared_ptr<GradientCompression> gc) {
gc_ = gc;
}
protected:
Context pinned_ctx_;
std::shared_ptr<GradientCompression> gc_;
};
/**
* \brief an implemention of Comm that first copy data to CPU memeory, and then
* reduce there
*/
class CommCPU : public Comm {
public:
CommCPU() {
nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4);
bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000);
// TODO(junwu) delete the following data member, now for benchmark only
is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0);
}
virtual ~CommCPU() { }
void Init(int key, const NDArrayStorageType stype, const TShape& shape,
int type = mshadow::kFloat32) override {
// Delayed allocation - the dense merged buffer might not be used at all if push()
// only sees sparse arrays
bool delay_alloc = true;
merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type);
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
auto& buf = merge_buf_[key];
const auto stype = src[0].storage_type();
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
if (stype == kDefaultStorage) {
return src[0];
} else {
// With 'local' kvstore, we could store the weight on CPU while compute
// the gradient on GPU when the weight is extremely large.
// To avoiding copying the weight to the same context of the gradient,
// we always copy the gradient to merged buf.
NDArray& merged = buf.merged_buf(stype);
CopyFromTo(src[0], &merged, priority);
return merged;
}
}
NDArray& buf_merged = buf.merged_buf(stype);
// normal dense reduce
if (stype == kDefaultStorage) {
std::vector<Engine::VarHandle> const_vars(src.size() - 1);
std::vector<NDArray> reduce(src.size());
CopyFromTo(src[0], &buf_merged, priority);
reduce[0] = buf_merged;
if (buf.copy_buf.empty()) {
buf.copy_buf.resize(src.size()-1);
for (size_t j = 0; j < src.size() - 1; ++j) {
// allocate copy buffer
buf.copy_buf[j] = NDArray(
src[0].shape(), pinned_ctx_, false, src[0].dtype());
}
}
CHECK(stype == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << stype << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 1; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority);
reduce[i] = buf.copy_buf[i-1];
const_vars[i-1] = reduce[i].var();
}
Engine::Get()->PushAsync(
[reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) {
ReduceSumCPU(reduce);
on_complete();
}, Context::CPU(), const_vars, {reduce[0].var()},
FnProperty::kCPUPrioritized, priority, "KVStoreReduce");
} else {
// sparse reduce
std::vector<Engine::VarHandle> const_vars(src.size());
std::vector<NDArray> reduce(src.size());
if (buf.copy_buf.empty()) {
buf.copy_buf.resize(src.size());
for (size_t j = 0; j < src.size(); ++j) {
buf.copy_buf[j] = NDArray(
src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype());
}
}
CHECK(stype == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << stype << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 0; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
const_vars[i] = reduce[i].var();
}
Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(),
ResourceRequest(ResourceRequest::kTempSpace));
Engine::Get()->PushAsync(
[reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) {
NDArray out = buf_merged;
is_serial_push_?
ReduceSumCPUExSerial(reduce, &out)
: mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out);
on_complete();
}, Context::CPU(), const_vars, {buf_merged.var(), rsc.var},
FnProperty::kCPUPrioritized, priority, "KVStoreReduce");
}
return buf_merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
int mask = src.ctx().dev_mask();
if (mask == Context::kCPU) {
for (auto d : dst) CopyFromTo(src, d, priority);
} else {
// First copy data to pinned_ctx, then broadcast.
// Note that kv.init initializes the data on pinned_ctx.
// This branch indicates push() with ndarrays on gpus were called,
// and the source is copied to gpu ctx.
// Also indicates that buffers are already initialized during push().
auto& buf = merge_buf_[key].merged_buf(src.storage_type());
CopyFromTo(src, &buf, priority);
for (auto d : dst) CopyFromTo(buf, d, priority);
}
}
void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) override {
using namespace mshadow;
CHECK_EQ(src.storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row-sparse src NDArray";
CHECK_EQ(src.ctx().dev_mask(), Context::kCPU)
<< "BroadcastRowSparse with src on gpu context not supported";
for (size_t i = 0; i < dst.size(); ++i) {
NDArray* out = dst[i].first;
NDArray row_id = dst[i].second;
CHECK_EQ(out->storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row_sparse dst NDArray";
CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU)
<< "BroadcastRowSparse with row_indices on gpu context not supported";
// retain according to unique indices
const bool is_same_ctx = out->ctx() == src.ctx();
const bool is_diff_var = out->var() != src.var();
NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out :
NDArray(kRowSparseStorage, src.shape(), src.ctx(), true,
src.dtype(), src.aux_types());
if (!is_diff_var) {
common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) +
"refers to the same NDArray as the one stored in KVStore."
"Performing row_sparse_pull() with such output is going to change the "
"data stored in KVStore. Incorrect result may be generated "
"next time row_sparse_pull() is called. To avoid such an issue,"
"consider create a new NDArray buffer to store the output.");
}
Engine::Get()->PushAsync(
[=](RunContext rctx, Engine::CallbackOnComplete on_complete) {
const TBlob& indices = row_id.data();
NDArray temp = retained_cpu; // get rid the of const qualifier
op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(),
src, indices, kWriteTo,
&temp);
on_complete();
}, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()},
FnProperty::kNormal, priority, "KVStoreSparseRetain");
// if retained_cpu == out, CopyFromTo will ignore the copy operation
CopyFromTo(retained_cpu, out, priority);
}
}
private:
// reduce sum into val[0]
inline void ReduceSumCPU(const std::vector<NDArray> &in_data) {
MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, {
std::vector<DType*> dptr(in_data.size());
for (size_t i = 0; i < in_data.size(); ++i) {
TBlob data = in_data[i].data();
CHECK(data.CheckContiguous());
dptr[i] = data.FlatTo2D<cpu, DType>().dptr_;
}
size_t total = in_data[0].shape().Size();
ReduceSumCPUImpl(dptr, total);
});
}
// serial implementation of reduce sum for row sparse NDArray.
inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) {
using namespace rowsparse;
using namespace mshadow;
auto stype = out->storage_type();
CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype;
size_t total_num_rows = 0;
size_t num_in = in.size();
// skip the ones with empty indices and values
std::vector<bool> skip(num_in, false);
// the values tensor of the inputs
MSHADOW_TYPE_SWITCH(out->dtype(), DType, {
MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, {
std::vector<Tensor<cpu, 2, DType>> in_vals(num_in);
std::vector<Tensor<cpu, 1, IType>> in_indices(num_in);
// offset to the values tensor of all inputs
std::vector<size_t> offsets(num_in, 0);
std::vector<size_t> num_rows(num_in, 0);
for (size_t i = 0; i < num_in; i++) {
if (!in[i].storage_initialized()) {
skip[i] = true;
continue;
}
auto size = in[i].aux_shape(kIdx).Size();
num_rows[i] = size;
total_num_rows += size;
in_vals[i] = in[i].data().FlatTo2D<cpu, DType>();
in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>();
}
std::vector<IType> indices;
indices.reserve(total_num_rows);
// gather indices from all inputs
for (size_t i = 0; i < num_in; i++) {
for (size_t j = 0; j < num_rows[i]; j++) {
indices.emplace_back(in_indices[i][j]);
}
}
CHECK_EQ(indices.size(), total_num_rows);
// dedup indices
std::sort(indices.begin(), indices.end());
indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin());
// the one left are unique non-zero rows
size_t nnr = indices.size();
// allocate memory for output
out->CheckAndAlloc({Shape1(nnr)});
auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>();
auto val_data = out->data().FlatTo2D<cpu, DType>();
for (size_t i = 0; i < nnr; i++) {
// copy indices back
idx_data[i] = indices[i];
bool zeros = true;
for (size_t j = 0; j < num_in; j++) {
if (skip[j]) continue;
size_t offset = offsets[j];
if (offset < num_rows[j]) {
if (indices[i] == in_indices[j][offset]) {
if (zeros) {
Copy(val_data[i], in_vals[j][offset], nullptr);
zeros = false;
} else {
val_data[i] += in_vals[j][offset];
}
offsets[j] += 1;
}
}
}
}
});
});
}
template<typename DType>
inline static void ReduceSumCPU(
const std::vector<DType*> &dptr, size_t offset, index_t size) {
using namespace mshadow; // NOLINT(*)
Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size));
for (size_t i = 1; i < dptr.size(); i+=4) {
switch (dptr.size() - i) {
case 1: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
in_0 += in_1;
break;
}
case 2: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
in_0 += in_1 + in_2;
break;
}
case 3: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3;
break;
}
default: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3 + in_4;
break;
}
}
}
}
template<typename DType>
inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) {
const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10));
long ntask = (total + step - 1) / step; // NOLINT(*)
if (total < bigarray_bound_ || nthread_reduction_ <= 1) {
ReduceSumCPU(dptr, 0, total);
} else {
#pragma omp parallel for schedule(static) num_threads(nthread_reduction_)
for (long j = 0; j < ntask; ++j) { // NOLINT(*)
size_t k = static_cast<size_t>(j);
size_t begin = std::min(k * step, total);
size_t end = std::min((k + 1) * step, total);
if (j == ntask - 1) CHECK_EQ(end, total);
ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin));
}
}
}
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the merged value
NDArray merged;
/// \brief the cpu buffer for gpu data
std::vector<NDArray> copy_buf;
/// \brief the merged buffer for the given storage type
inline NDArray& merged_buf(NDArrayStorageType stype) {
if (stype == kDefaultStorage) {
return merged;
}
CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype;
// check if sparse_merged is initialized
if (sparse_merged.is_none()) {
CHECK(!merged.is_none());
sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(),
true, merged.dtype());
}
return sparse_merged;
}
private:
/// \brief the sparse merged value
NDArray sparse_merged;
};
std::unordered_map<int, BufferEntry> merge_buf_;
size_t bigarray_bound_;
int nthread_reduction_;
bool is_serial_push_;
};
/**
* \brief an implementation of Comm that performs reduction on device
* directly.
*
* It is faster if the total device-to-device bandwidths is larger than
* device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device
* memory.
*/
class CommDevice : public Comm {
public:
CommDevice() {
inited_ = false;
}
virtual ~CommDevice() { }
void Init(int key, const NDArrayStorageType stype, const TShape& shape,
int dtype = mshadow::kFloat32) override {
sorted_key_attrs_.emplace_back(key, shape, dtype);
}
void InitBuffersAndComm(const std::vector<NDArray>& src) {
if (!inited_) {
std::vector<Context> devs;
for (const auto& a : src) {
devs.push_back(a.ctx());
}
InitMergeBuffer(devs);
if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) {
EnableP2P(devs);
}
}
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
// when this reduce is called from kvstore_dist, gc is not set
// we don't do compression twice in dist_sync_device
if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) {
return ReduceCompressed(key, src, priority);
}
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
return src[0];
}
InitBuffersAndComm(src);
auto& buf = merge_buf_[key];
std::vector<NDArray> reduce(src.size());
const NDArrayStorageType stype = src[0].storage_type();
NDArray& buf_merged = buf.merged_buf(stype);
// normal dense reduce
if (stype == kDefaultStorage) {
CopyFromTo(src[0], &buf_merged, priority);
reduce[0] = buf_merged;
if (buf.copy_buf.empty()) {
// TODO(mli) this results in large device memory usage for huge ndarray,
// such as the largest fullc in VGG. consider to do segment reduce with
// NDArray.Slice or gpu direct memory access. for the latter, we need to
// remove some ctx check, and also it reduces 20% perf
buf.copy_buf.resize(src.size()-1);
for (size_t i = 0; i < src.size()-1; ++i) {
buf.copy_buf[i] = NDArray(
buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype());
}
}
for (size_t i = 0; i < src.size()-1; ++i) {
CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority);
reduce[i+1] = buf.copy_buf[i];
}
} else {
// sparse reduce
if (buf.copy_buf.empty()) {
// initialize buffer for copying during reduce
buf.copy_buf.resize(src.size());
for (size_t j = 0; j < src.size(); ++j) {
buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype());
}
}
CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 0; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
}
}
ElementwiseSum(reduce, &buf_merged, priority);
return buf_merged;
}
const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src,
int priority) {
InitBuffersAndComm(src);
auto& buf = merge_buf_[key];
std::vector<NDArray> reduce(src.size());
if (buf.copy_buf.empty()) {
// one buf for each context
buf.copy_buf.resize(src.size());
buf.compressed_recv_buf.resize(src.size());
buf.compressed_send_buf.resize(src.size());
buf.residual.resize(src.size());
for (size_t i = 0; i < src.size(); ++i) {
buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(),
false, buf.merged.dtype());
buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(),
false, buf.merged.dtype());
buf.residual[i] = 0;
int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size());
buf.compressed_recv_buf[i] = NDArray(TShape{small_size}, buf.merged.ctx(),
false, buf.merged.dtype());
buf.compressed_send_buf[i] = NDArray(TShape{small_size}, src[i].ctx(),
false, buf.merged.dtype());
}
}
for (size_t i = 0; i < src.size(); ++i) {
// compress before copy
// this is done even if the data is on same context as copy_buf because
// we don't want the training to be biased towards data on this GPU
gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority);
if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) {
CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority);
} else {
// avoid memory copy when they are on same context
buf.compressed_recv_buf[i] = buf.compressed_send_buf[i];
}
gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf.merged);
return buf.merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
if (!inited_) {
// copy to a random device first
int dev_id = key % dst.size();
CopyFromTo(src, dst[dev_id], priority);
for (size_t i = 0; i < dst.size(); ++i) {
if (i != static_cast<size_t>(dev_id)) {
CopyFromTo(*dst[dev_id], dst[i], priority);
}
}
} else {
auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type());
CopyFromTo(src, &buf_merged, priority);
for (auto d : dst) {
CopyFromTo(buf_merged, d, priority);
}
}
}
void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) override {
CHECK_EQ(src.storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row-sparse src NDArray";
for (size_t i = 0; i < dst.size(); ++i) {
NDArray* out = dst[i].first;
NDArray row_id = dst[i].second;
CHECK_EQ(out->storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row_sparse dst NDArray";
CHECK_EQ(row_id.ctx(), src.ctx())
<< "row_id and src are expected to be on the same context";
// retain according to indices
const bool is_same_ctx = out->ctx() == src.ctx();
const bool is_diff_var = out->var() != src.var();
NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out :
NDArray(kRowSparseStorage, out->shape(), src.ctx(), true,
out->dtype(), out->aux_types());
if (!is_diff_var) {
common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) +
"refers to the same NDArray as the one stored in KVStore."
"Performing row_sparse_pull() with such output is going to change the "
"data stored in KVStore. Incorrect result may be generated "
"next time row_sparse_pull() is called. To avoid such an issue,"
"consider create a new NDArray buffer to store the output.");
}
Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) {
const TBlob& indices = row_id.data();
using namespace mxnet::common;
NDArray temp = retained_gpu;
switch (temp.ctx().dev_mask()) {
case cpu::kDevMask: {
SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(),
src, indices, kWriteTo, &temp);
break;
}
#if MXNET_USE_CUDA
case gpu::kDevMask: {
SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(),
src, indices, kWriteTo, &temp);
// wait for GPU operations to complete
rctx.get_stream<gpu>()->Wait();
break;
}
#endif
default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR;
}
on_complete();
}, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()},
FnProperty::kNormal, priority, "KVStoreSparseRetain");
CopyFromTo(retained_gpu, out, priority);
}
}
private:
void EnableP2P(const std::vector<Context>& devs) {
#if MXNET_USE_CUDA
std::vector<int> gpus;
for (const auto& d : devs) {
if (d.dev_mask() == gpu::kDevMask) {
gpus.push_back(d.dev_id);
}
}
int n = static_cast<int>(gpus.size());
int enabled = 0;
std::vector<int> p2p(n*n);
for (int i = 0; i < n; ++i) {
cudaSetDevice(gpus[i]);
for (int j = 0; j < n; j++) {
int access;
cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]);
if (access) {
cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0);
if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) {
++enabled;
p2p[i*n+j] = 1;
}
}
}
}
if (enabled != n*(n-1)) {
// print warning info if not fully enabled
LOG(WARNING) << "only " << enabled << " out of "
<< n*(n-1) << " GPU pairs are enabled direct access. "
<< "It may affect the performance. "
<< "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off";
std::string access(n, '.');
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
access[j] = p2p[i*n+j] ? 'v' : '.';
}
LOG(WARNING) << access;
}
}
#endif
}
using KeyAttrs = std::tuple<int, TShape, int>;
// try to allocate buff on device evenly
void InitMergeBuffer(const std::vector<Context>& devs) {
std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), [](
const KeyAttrs& a, const KeyAttrs& b) {
return std::get<1>(a).Size() > std::get<1>(b).Size();
});
std::unordered_map<int, std::pair<Context, size_t>> ctx_info;
for (auto d : devs) {
ctx_info[d.dev_id] = std::make_pair(d, 0);
}
for (size_t i = 0; i < sorted_key_attrs_.size(); ++i) {
const int key = std::get<0>(sorted_key_attrs_[i]);
const TShape& shape = std::get<1>(sorted_key_attrs_[i]);
const int type = std::get<2>(sorted_key_attrs_[i]);
auto& buf = merge_buf_[key];
Context ctx;
size_t min_size = std::numeric_limits<size_t>::max();
for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) {
size_t size = it->second.second;
if (size <= min_size) {
ctx = it->second.first;
min_size = size;
}
}
// Delayed allocation - as the dense merged buffer might not be used at all if push()
// only sees sparse arrays
bool delay_alloc = true;
buf.merged = NDArray(shape, ctx, delay_alloc, type);
ctx_info[ctx.dev_id].second += shape.Size();
}
inited_ = true;
}
std::vector<KeyAttrs> sorted_key_attrs_;
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the dense merged value for reduce and broadcast operations
NDArray merged;
/// \brief the gpu buffer for copy during reduce operation
std::vector<NDArray> copy_buf;
/// \brief the residual buffer for gradient compression
std::vector<NDArray> residual;
/// \brief the small buffer for compressed data in sender
std::vector<NDArray> compressed_send_buf;
/// \brief the small buffer for compressed data in receiver
std::vector<NDArray> compressed_recv_buf;
/// \brief the merged buffer for the given storage type (could be either dense or row_sparse)
inline NDArray& merged_buf(NDArrayStorageType stype) {
if (stype == kDefaultStorage) {
CHECK(!merged.is_none()) << "unintialized merge buffer detected";
return merged;
}
CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype;
// check if sparse_merged is initialized
if (sparse_merged.is_none()) {
CHECK(!merged.is_none());
sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(),
true, merged.dtype());
}
return sparse_merged;
}
private:
/// \brief the sparse merged value for reduce and rowsparse broadcast operations
NDArray sparse_merged;
};
std::unordered_map<int, BufferEntry> merge_buf_;
bool inited_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_COMM_H_
|
conv5x5s2_pack4_neon.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv5x5s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = (w - 2*outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
for (int q=0; q<inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
const float* kptr = (const float*)kernel.channel(p).row(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j+3<outw; j+=4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n"// sum0 sum1 sum2 sum3
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"// r00 r01 r02 r03
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"// r04 r05 r06 r07
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%1] \n"// r08 r09 r010
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r10 r11 r12 r13
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n"// r14 r15 r16 r17
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%2] \n"// r18 r19 r110
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v28.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v28.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v28.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v7.s[0] \n"
"fmla v23.4s, v16.4s, v29.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v7.s[1] \n"
"fmla v23.4s, v17.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v7.s[2] \n"
"fmla v23.4s, v18.4s, v29.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v7.s[3] \n"
"fmla v23.4s, v19.4s, v29.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r20 r21 r22 r23
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v6.s[0] \n"
"fmla v22.4s, v24.4s, v28.s[0] \n"
"fmla v23.4s, v24.4s, v30.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"fmla v22.4s, v25.4s, v28.s[1] \n"
"fmla v23.4s, v25.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v6.s[2] \n"
"fmla v22.4s, v26.4s, v28.s[2] \n"
"fmla v23.4s, v26.4s, v30.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v27.4s, v28.s[3] \n"
"fmla v23.4s, v27.4s, v30.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n"// r24 r25 r26 r27
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%3] \n"// r28 r29 r210
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4], #64 \n"// r30 r31 r32 r33
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"// r34 r35 r36 r37
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%4] \n"// r38 r39 r310
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v28.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v28.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v28.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v7.s[0] \n"
"fmla v23.4s, v16.4s, v29.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v7.s[1] \n"
"fmla v23.4s, v17.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v7.s[2] \n"
"fmla v23.4s, v18.4s, v29.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v7.s[3] \n"
"fmla v23.4s, v19.4s, v29.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"// r40 r41 r42 r43
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v6.s[0] \n"
"fmla v22.4s, v24.4s, v28.s[0] \n"
"fmla v23.4s, v24.4s, v30.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"fmla v22.4s, v25.4s, v28.s[1] \n"
"fmla v23.4s, v25.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v6.s[2] \n"
"fmla v22.4s, v26.4s, v28.s[2] \n"
"fmla v23.4s, v26.4s, v30.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v27.4s, v28.s[3] \n"
"fmla v23.4s, v27.4s, v30.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%5], #64 \n"// r44 r45 r46 r47
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%5] \n"// r48 r49 r410
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
// "prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6] \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
);
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n"// sum0 sum1 sum2 sum3
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n"// r00 r01 r02 r03
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"// r04 r05 r06 r07
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"// r08 r09
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d14[0] \n"
"vmla.f32 q15, q8, d2[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d14[1] \n"
"vmla.f32 q15, q9, d2[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d15[0] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d15[1] \n"
"vmla.f32 q15, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%1, #128] \n"
"vld1.f32 {d4-d5}, [%1 :128] \n"// r010
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d8-d15} \n"// r10 r11 r12 r13
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"// r14 r15 r16 r17
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d10[0] \n"
"vmla.f32 q13, q8, d14[0] \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d10[1] \n"
"vmla.f32 q13, q9, d14[1] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d11[0] \n"
"vmla.f32 q13, q10, d15[0] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d11[1] \n"
"vmla.f32 q13, q11, d15[1] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #256] \n"
"vld1.f32 {d8-d11}, [%2 :128]! \n"// r18 r19
"vmla.f32 q12, q8, d12[0] \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d12[1] \n"
"vmla.f32 q13, q9, d0[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d13[0] \n"
"vmla.f32 q13, q10, d1[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d13[1] \n"
"vmla.f32 q13, q11, d1[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d14[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d14[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d15[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d15[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #128] \n"
"vld1.f32 {d12-d13}, [%2 :128] \n"// r110
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%3, #512] \n"
"vldm %3!, {d0-d7} \n"// r20 r21 r22 r23
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"// r24 r25 r26 r27
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"// r28 r29
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d14[0] \n"
"vmla.f32 q15, q8, d2[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d14[1] \n"
"vmla.f32 q15, q9, d2[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d15[0] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d15[1] \n"
"vmla.f32 q15, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #128] \n"
"vld1.f32 {d4-d5}, [%3 :128] \n"// r210
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"// r30 r31 r32 r33
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #512] \n"
"vldm %4!, {d0-d7} \n"// r34 r35 r36 r37
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d10[0] \n"
"vmla.f32 q13, q8, d14[0] \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d10[1] \n"
"vmla.f32 q13, q9, d14[1] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d11[0] \n"
"vmla.f32 q13, q10, d15[0] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d11[1] \n"
"vmla.f32 q13, q11, d15[1] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"// r38 r39
"vmla.f32 q12, q8, d12[0] \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d12[1] \n"
"vmla.f32 q13, q9, d0[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d13[0] \n"
"vmla.f32 q13, q10, d1[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d13[1] \n"
"vmla.f32 q13, q11, d1[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d14[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d14[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d15[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d15[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #128] \n"
"vld1.f32 {d12-d13}, [%4 :128] \n"// r310
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"// r40 r41 r42 r43
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #512] \n"
"vldm %5!, {d8-d15} \n"// r44 r45 r46 r47
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #256] \n"
"vld1.f32 {d0-d3}, [%5 :128]! \n"// r48 r49
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d14[0] \n"
"vmla.f32 q15, q8, d2[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d14[1] \n"
"vmla.f32 q15, q9, d2[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d15[0] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d15[1] \n"
"vmla.f32 q15, q11, d3[1] \n"
// "pld [%6, #512] \n"
"vldm %6, {d16-d23} \n"
"pld [%5, #128] \n"
"vld1.f32 {d4-d5}, [%5 :128] \n"// r410
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"sub %1, %1, #32 \n"
"sub %2, %2, #32 \n"
"sub %3, %3, #32 \n"
"sub %4, %4, #32 \n"
"sub %5, %5, #32 \n"
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j+1<outw; j+=2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v20.4s, v21.4s}, [%0] \n"// sum0 sum1
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"// r00 r01 r02 r03
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmul v22.4s, v16.4s, v0.s[0] \n"
"fmul v23.4s, v16.4s, v2.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%1] \n"// r04 r05 r06
"fmla v22.4s, v24.4s, v1.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r10 r11 r12 r13
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%2] \n"// r14 r15 r16
"fmla v22.4s, v16.4s, v1.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r20 r21 r22 r23
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%3] \n"// r24 r25 r26
"fmla v22.4s, v24.4s, v1.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4], #64 \n"// r30 r31 r32 r33
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%4] \n"// r34 r35 r36
"fmla v22.4s, v16.4s, v1.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"// r40 r41 r42 r43
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%5] \n"// r44 r45 r46
"fmla v22.4s, v24.4s, v1.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
// "prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"st1 {v20.4s, v21.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
#else // __aarch64__
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128] \n"// sum0 sum1
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n"// r00 r01 r02 r03
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmul.f32 q14, q8, d0[0] \n"
"vmul.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%1, #384] \n"
"vldm %1, {d8-d13} \n"// r04 r05 r06
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"// r10 r11 r12 r13
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #384] \n"
"vldm %2, {d8-d13} \n"// r14 r15 r16
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #512] \n"
"vldm %3!, {d0-d7} \n"// r20 r21 r22 r23
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #384] \n"
"vldm %3, {d8-d13} \n"// r24 r25 r26
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #512] \n"
"vldm %4!, {d0-d7} \n"// r30 r31 r32 r33
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #384] \n"
"vldm %4, {d8-d13} \n"// r34 r35 r36
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"// r40 r41 r42 r43
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #384] \n"
"vldm %5, {d8-d13} \n"// r44 r45 r46
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
// "pld [%6, #512] \n"
"vldm %6, {d16-d23} \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"vst1.f32 {d24-d27}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j<outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n"// sum0
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1], #32 \n"// r00 r01
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmul v21.4s, v16.4s, v0.s[0] \n"
"fmul v22.4s, v17.4s, v0.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmul v23.4s, v18.4s, v0.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v2.4s, v3.4s, v4.4s}, [%1] \n"// r02 r03 r04
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%2], #32 \n"// r10 r11
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v2.4s, v3.4s, v4.4s}, [%2] \n"// r12 r13 r14
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n"// r20 r21
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v2.4s, v3.4s, v4.4s}, [%3] \n"// r22 r23 r24
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4s, v1.4s}, [%4], #32 \n"// r30 r31
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v2.4s, v3.4s, v4.4s}, [%4] \n"// r32 r33 r34
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4s, v1.4s}, [%5], #32 \n"// r40 r41
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v2.4s, v3.4s, v4.4s}, [%5] \n"// r42 r43 r44
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
// "prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fadd v22.4s, v21.4s, v22.4s \n"
"fadd v23.4s, v22.4s, v23.4s \n"
"fadd v20.4s, v20.4s, v23.4s \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"st1 {v20.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
#else // __aarch64__
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d24-d25}, [%0 :128] \n"// sum0
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"// r00 r01
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmul.f32 q13, q8, d0[0] \n"
"vmul.f32 q14, q9, d0[1] \n"
"vmul.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%1, #384] \n"
"vldm %1, {d4-d9} \n"// r02 r03 r04
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2 :128]! \n"// r10 r11
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #384] \n"
"vldm %2, {d4-d9} \n"// r12 r13 r14
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"// r20 r21
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #384] \n"
"vldm %3, {d4-d9} \n"// r22 r23 r24
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #256] \n"
"vld1.f32 {d0-d3}, [%4 :128]! \n"// r30 r31
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #384] \n"
"vldm %4, {d4-d9} \n"// r32 r33 r34
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #256] \n"
"vld1.f32 {d0-d3}, [%5 :128]! \n"// r40 r41
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #384] \n"
"vldm %5, {d4-d9} \n"// r42 r43 r44
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
// "pld [%6, #512] \n"
"vldm %6, {d16-d23} \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vadd.f32 q14, q13, q14 \n"
"vadd.f32 q15, q14, q15 \n"
"vadd.f32 q12, q12, q15 \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"vst1.f32 {d24-d25}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
}
}
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _DoublePixelPacket
{
double
red,
green,
blue,
alpha;
} DoublePixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
double
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
double
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
double
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *,ExceptionInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *,ExceptionInfo *),
SetGrayscaleImage(Image *,ExceptionInfo *);
static size_t
DefineImageColormap(Image *,CubeInfo *,NodeInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info));
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither_method=image_info->dither == MagickFalse ?
NoDitherMethod : RiemersmaDitherMethod;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const Image *image,
const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(GetPixelAlpha(image,pixel) == OpaqueAlpha))
{
alpha_pixel->red=(double) GetPixelRed(image,pixel);
alpha_pixel->green=(double) GetPixelGreen(image,pixel);
alpha_pixel->blue=(double) GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel));
alpha_pixel->red=alpha*GetPixelRed(image,pixel);
alpha_pixel->green=alpha*GetPixelGreen(image,pixel);
alpha_pixel->blue=alpha*GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
}
static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info,
const PixelInfo *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->alpha == OpaqueAlpha))
{
alpha_pixel->red=(double) pixel->red;
alpha_pixel->green=(double) pixel->green;
alpha_pixel->blue=(double) pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
return;
}
alpha=(double) (QuantumScale*pixel->alpha);
alpha_pixel->red=alpha*pixel->red;
alpha_pixel->green=alpha*pixel->green;
alpha_pixel->blue=alpha*pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) |
((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3;
return(id);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
if (AcquireImageColormap(image,cube_info->colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AssignImageColors)
#endif
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=0.0;
if ((image->colors > 1) &&
(GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace,exception);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue :
MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
double
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace,exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
midpoint.red=(double) QuantumRange/2.0;
midpoint.green=(double) QuantumRange/2.0;
midpoint.blue=(double) QuantumRange/2.0;
midpoint.alpha=(double) QuantumRange/2.0;
error.alpha=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance))
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != MagickFalse)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
pixel;
register double
alpha,
beta,
distance;
register DoublePixelPacket
*magick_restrict q;
register PixelInfo
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(double) (QuantumScale*p->alpha);
beta=(double) (QuantumScale*q->alpha);
}
pixel=alpha*p->red-beta*q->red;
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->green-beta*q->green;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->blue-beta*q->blue;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=p->alpha-q->alpha;
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image,
ExceptionInfo *exception)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero. DefineImageColormap() returns the number of
% colors in the image colormap.
%
% The format of the DefineImageColormap method is:
%
% size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
(void) DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register double
alpha;
register PixelInfo
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(double) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
q->alpha=(double) OpaqueAlpha;
}
else
{
double
opacity;
opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha);
q->alpha=(double) ClampToQuantum(opacity);
if (q->alpha == OpaqueAlpha)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
}
else
{
double
gamma;
gamma=(double) (QuantumScale*q->alpha);
gamma=PerceptibleReciprocal(gamma);
q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.blue);
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
return(image->colors);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2*
sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
const char
*artifact;
double
amount;
DoublePixelPacket
**pixels;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
status=MagickTrue;
amount=1.0;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
amount=StringToDoubleInterval(artifact,1.0);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register Quantum
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel);
if (x > 0)
{
pixel.red+=7.0*amount*current[u-v].red/16;
pixel.green+=7.0*amount*current[u-v].green/16;
pixel.blue+=7.0*amount*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=7.0*amount*current[u-v].alpha/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=previous[u+v].alpha/16;
}
pixel.red+=5.0*amount*previous[u].red/16;
pixel.green+=5.0*amount*previous[u].green/16;
pixel.blue+=5.0*amount*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=5.0*amount*previous[u].alpha/16;
if (x > 0)
{
pixel.red+=3.0*amount*previous[u-v].red/16;
pixel.green+=3.0*amount*previous[u-v].green/16;
pixel.blue+=3.0*amount*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=3.0*amount*previous[u-v].alpha/16;
}
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
node_id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
node_id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[node_id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[node_id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image));
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),
q+u*GetPixelChannels(image));
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),
q+u*GetPixelChannels(image));
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),
q+u*GetPixelChannels(image));
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),
q+u*GetPixelChannels(image));
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixelInfo(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].alpha=pixel.alpha-color.alpha;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int,
ExceptionInfo *);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction,ExceptionInfo *exception)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
register Quantum
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
AssociateAlphaPixel(image,cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha+=p->weights[i]*p->error[i].alpha;
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(double) (4.0*(QuantumRange+1.0)*((double)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) p->cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q);
if (cube_info->associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info,exception));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*
sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
double
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither_method == NoDitherMethod)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*
length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
size_t
index;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=GetPixelIndex(image,p);
if (image->alpha_trait == BlendPixelTrait)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*image->colormap[index].alpha);
}
distance=fabs((double) (alpha*GetPixelRed(image,p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const DitherMethod dither_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither_method: choose from UndefinedDitherMethod, NoDitherMethod,
% RiemersmaDitherMethod, FloydSteinbergDitherMethod.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const DitherMethod dither_method,ExceptionInfo *exception)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \
QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double)
PosterizePixel(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double)
PosterizePixel(image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double)
PosterizePixel(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double)
PosterizePixel(image->colormap[i].alpha);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait == BlendPixelTrait))
SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PosterizeImage)
#endif
proceed=SetImageProgress(image,PosterizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither_method=dither_method;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.alpha+=node_info->total_color.alpha;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->alpha_trait != BlendPixelTrait)
{
if (SetImageGray(image,exception) != MagickFalse)
(void) SetGrayscaleImage(image,exception);
}
if ((image->storage_class == PseudoClass) &&
(image->colors <= maximum_colors))
{
if ((quantize_info->colorspace != UndefinedColorspace) &&
(quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace(image,quantize_info->colorspace,
exception);
return(MagickTrue);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2))
depth--;
if ((image->alpha_trait == BlendPixelTrait) && (depth > 5))
depth--;
if (SetImageGray(image,exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image if it contains more than the
maximum, otherwise we can disable dithering to improve the performance.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
else
cube_info->quantize_info->dither_method=NoDitherMethod;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images,exception);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither_method != NoDitherMethod)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% double *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,double *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int QuantizeErrorCompare(const void *error_p,const void *error_q)
{
double
*p,
*q;
p=(double *) error_p;
q=(double *) error_q;
if (*p > *q)
return(1);
if (fabs(*q-*p) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
double
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (double *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(double),
QuantizeErrorCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(double *) RelinquishMagickMemory(quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest of the colors
% from the reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images,exception);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelInfo
*color_1,
*color_2;
color_1=(PixelInfo *) x;
color_2=(PixelInfo *) y;
intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)-
GetPixelInfoIntensity((const Image *) NULL,color_2);
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
register ssize_t
i;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
if (image->storage_class == PseudoClass)
colormap_index=(ssize_t *) AcquireQuantumMemory(image->colors+1,
sizeof(*colormap_index));
else
colormap_index=(ssize_t *) AcquireQuantumMemory(MaxColormapSize+1,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),MaxColormapSize*
sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
profile.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/option-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#else
#include <wchar.h>
#include "lcms2.h"
#endif
#endif
/*
Forward declarations
*/
static MagickBooleanType
SetImageProfileInternal(Image *,const char *,const StringInfo *,
const MagickBooleanType,ExceptionInfo *);
static void
WriteTo8BimProfile(Image *,const char*,const StringInfo *);
/*
Typedef declarations
*/
struct _ProfileInfo
{
char
*name;
size_t
length;
unsigned char
*info;
size_t
signature;
};
typedef struct _CMSExceptionInfo
{
Image
*image;
ExceptionInfo
*exception;
} CMSExceptionInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickCoreSignature);
if (clone_image->profiles != (void *) NULL)
{
if (image->profiles != (void *) NULL)
DestroyImageProfiles(image);
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
static unsigned short **DestroyPixelThreadSet(unsigned short **pixels)
{
register ssize_t
i;
assert(pixels != (unsigned short **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (unsigned short *) NULL)
pixels[i]=(unsigned short *) RelinquishMagickMemory(pixels[i]);
pixels=(unsigned short **) RelinquishMagickMemory(pixels);
return(pixels);
}
static unsigned short **AcquirePixelThreadSet(const size_t columns,
const size_t channels)
{
register ssize_t
i;
unsigned short
**pixels;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(unsigned short **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (unsigned short **) NULL)
return((unsigned short **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(unsigned short *) AcquireQuantumMemory(columns,channels*
sizeof(**pixels));
if (pixels[i] == (unsigned short *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
register ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image,
const cmsHPROFILE source_profile,const cmsUInt32Number source_type,
const cmsHPROFILE target_profile,const cmsUInt32Number target_type,
const int intent,const cmsUInt32Number flags)
{
cmsHTRANSFORM
*transform;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) ResetMagickMemory(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR((cmsContext) image,source_profile,
source_type,target_profile,target_type,intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
#endif
#if defined(MAGICKCORE_LCMS_DELEGATE)
static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
CMSExceptionInfo
*cms_exception;
ExceptionInfo
*exception;
Image
*image;
cms_exception=(CMSExceptionInfo *) context;
if (cms_exception == (CMSExceptionInfo *) NULL)
return;
exception=cms_exception->exception;
if (exception == (ExceptionInfo *) NULL)
return;
image=cms_exception->image;
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s'","unknown context");
return;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s'",image->filename);
}
#endif
static MagickBooleanType SetsRGBImageProfile(Image *image,
ExceptionInfo *exception)
{
static unsigned char
sRGBProfile[] =
{
0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00,
0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20,
0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a,
0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00,
0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99,
0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67,
0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70,
0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88,
0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c,
0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24,
0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14,
0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24,
0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14,
0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14,
0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14,
0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14,
0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14,
0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36,
0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76,
0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77,
0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39,
0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31,
0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75,
0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77,
0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20,
0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d,
0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f,
0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20,
0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57,
0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65,
0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e,
0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20,
0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e,
0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e,
0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47,
0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61,
0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43,
0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63,
0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20,
0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00,
0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c,
0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2,
0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d,
0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00,
0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0,
0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87,
0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4,
0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19,
0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37,
0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54,
0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72,
0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90,
0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae,
0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb,
0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb,
0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d,
0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32,
0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59,
0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83,
0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1,
0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1,
0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14,
0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b,
0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84,
0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1,
0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00,
0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43,
0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a,
0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3,
0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20,
0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71,
0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4,
0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c,
0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77,
0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5,
0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37,
0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d,
0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07,
0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74,
0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5,
0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a,
0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2,
0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f,
0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf,
0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54,
0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc,
0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69,
0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9,
0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e,
0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26,
0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3,
0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64,
0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09,
0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3,
0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61,
0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13,
0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9,
0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84,
0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43,
0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06,
0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce,
0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b,
0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c,
0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41,
0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b,
0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa,
0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd,
0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5,
0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2,
0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3,
0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99,
0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94,
0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94,
0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98,
0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1,
0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf,
0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2,
0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda,
0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7,
0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18,
0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f,
0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b,
0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b,
0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1,
0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c,
0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c,
0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91,
0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb,
0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a,
0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f,
0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8,
0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37,
0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c,
0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05,
0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74,
0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8,
0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61,
0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0,
0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64,
0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee,
0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d,
0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12,
0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab,
0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b,
0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0,
0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a,
0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a,
0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00,
0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb,
0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c,
0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42,
0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f,
0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0,
0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8,
0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95,
0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78,
0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61,
0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f,
0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43,
0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d,
0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d,
0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43,
0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f,
0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60,
0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78,
0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95,
0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8,
0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1,
0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11,
0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46,
0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81,
0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2,
0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a,
0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57,
0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab,
0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04,
0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64,
0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca,
0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36,
0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8,
0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20,
0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f,
0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24,
0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf,
0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40,
0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8,
0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76,
0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a,
0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4,
0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75,
0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d,
0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea,
0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae,
0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79,
0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a,
0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21,
0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff,
0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3,
0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce,
0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf,
0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7,
0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5,
0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba,
0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6,
0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8,
0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1,
0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10,
0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36,
0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63,
0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96,
0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0,
0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11,
0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58,
0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7,
0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb,
0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57,
0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba,
0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff
};
StringInfo
*profile;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (GetImageProfile(image,"icc") != (const StringInfo *) NULL)
return(MagickFalse);
profile=AcquireStringInfo(sizeof(sRGBProfile));
SetStringInfoDatum(profile,sRGBProfile);
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
return(status);
}
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,ExceptionInfo *exception)
{
#define ProfileImageTag "Profile/Image"
#define ThrowProfileException(severity,tag,context) \
{ \
if (source_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_profile); \
if (target_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
*next;
/*
Delete image profile(s).
*/
ResetImageProfileIterator(image);
for (next=GetNextImageProfile(image); next != (const char *) NULL; )
{
if (IsOptionMember(next,name) != MagickFalse)
{
(void) DeleteImageProfile(image,next);
ResetImageProfileIterator(image);
}
next=GetNextImageProfile(image);
}
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile,exception);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace",exception);
(void) value;
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image,exception);
value=GetImageProperty(image,"exif:InteroperabilityIndex",exception);
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image,exception);
/* Future.
value=GetImageProperty(image,"exif:InteroperabilityIndex",exception);
if (LocaleCompare(value,"R03.") != 0)
(void) SetAdobeRGB1998ImageProfile(image,exception);
*/
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (LCMS)",image->filename);
#else
{
cmsHPROFILE
source_profile;
CMSExceptionInfo
cms_exception;
/*
Transform pixel colors as defined by the color profiles.
*/
cmsSetLogErrorHandler(CMSExceptionHandler);
cms_exception.image=image;
cms_exception.exception=exception;
(void) cms_exception;
source_profile=cmsOpenProfileFromMemTHR((cmsContext) &cms_exception,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile,exception);
else
{
CacheView
*image_view;
ColorspaceType
source_colorspace,
target_colorspace;
cmsColorSpaceSignature
signature;
cmsHPROFILE
target_profile;
cmsHTRANSFORM
*magick_restrict transform;
cmsUInt32Number
flags,
source_type,
target_type;
int
intent;
MagickOffsetType
progress;
size_t
source_channels,
target_channels;
ssize_t
y;
unsigned short
**magick_restrict source_pixels,
**magick_restrict target_pixels;
target_profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_profile=source_profile;
source_profile=cmsOpenProfileFromMemTHR((cmsContext)
&cms_exception,GetStringInfoDatum(icc_profile),
(cmsUInt32Number) GetStringInfoLength(icc_profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
switch (cmsGetColorSpace(source_profile))
{
case cmsSigCmykData:
{
source_colorspace=CMYKColorspace;
source_type=(cmsUInt32Number) TYPE_CMYK_16;
source_channels=4;
break;
}
case cmsSigGrayData:
{
source_colorspace=GRAYColorspace;
source_type=(cmsUInt32Number) TYPE_GRAY_16;
source_channels=1;
break;
}
case cmsSigLabData:
{
source_colorspace=LabColorspace;
source_type=(cmsUInt32Number) TYPE_Lab_16;
source_channels=3;
break;
}
case cmsSigLuvData:
{
source_colorspace=YUVColorspace;
source_type=(cmsUInt32Number) TYPE_YUV_16;
source_channels=3;
break;
}
case cmsSigRgbData:
{
source_colorspace=sRGBColorspace;
source_type=(cmsUInt32Number) TYPE_RGB_16;
source_channels=3;
break;
}
case cmsSigXYZData:
{
source_colorspace=XYZColorspace;
source_type=(cmsUInt32Number) TYPE_XYZ_16;
source_channels=3;
break;
}
case cmsSigYCbCrData:
{
source_colorspace=YCbCrColorspace;
source_type=(cmsUInt32Number) TYPE_YCbCr_16;
source_channels=3;
break;
}
default:
{
source_colorspace=UndefinedColorspace;
source_type=(cmsUInt32Number) TYPE_RGB_16;
source_channels=3;
break;
}
}
signature=cmsGetPCS(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_profile);
switch (signature)
{
case cmsSigCmykData:
{
target_colorspace=CMYKColorspace;
target_type=(cmsUInt32Number) TYPE_CMYK_16;
target_channels=4;
break;
}
case cmsSigLabData:
{
target_colorspace=LabColorspace;
target_type=(cmsUInt32Number) TYPE_Lab_16;
target_channels=3;
break;
}
case cmsSigGrayData:
{
target_colorspace=GRAYColorspace;
target_type=(cmsUInt32Number) TYPE_GRAY_16;
target_channels=1;
break;
}
case cmsSigLuvData:
{
target_colorspace=YUVColorspace;
target_type=(cmsUInt32Number) TYPE_YUV_16;
target_channels=3;
break;
}
case cmsSigRgbData:
{
target_colorspace=sRGBColorspace;
target_type=(cmsUInt32Number) TYPE_RGB_16;
target_channels=3;
break;
}
case cmsSigXYZData:
{
target_colorspace=XYZColorspace;
target_type=(cmsUInt32Number) TYPE_XYZ_16;
target_channels=3;
break;
}
case cmsSigYCbCrData:
{
target_colorspace=YCbCrColorspace;
target_type=(cmsUInt32Number) TYPE_YCbCr_16;
target_channels=3;
break;
}
default:
{
target_colorspace=UndefinedColorspace;
target_type=(cmsUInt32Number) TYPE_RGB_16;
target_channels=3;
break;
}
}
if ((source_colorspace == UndefinedColorspace) ||
(target_colorspace == UndefinedColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == GRAYColorspace) &&
(SetImageGray(image,exception) == MagickFalse))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == CMYKColorspace) &&
(image->colorspace != CMYKColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == XYZColorspace) &&
(image->colorspace != XYZColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == YCbCrColorspace) &&
(image->colorspace != YCbCrColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace != CMYKColorspace) &&
(source_colorspace != LabColorspace) &&
(source_colorspace != XYZColorspace) &&
(source_colorspace != YCbCrColorspace) &&
(IssRGBCompatibleColorspace(image->colorspace) == MagickFalse))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
switch (image->rendering_intent)
{
case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break;
case PerceptualIntent: intent=INTENT_PERCEPTUAL; break;
case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break;
case SaturationIntent: intent=INTENT_SATURATION; break;
default: intent=INTENT_PERCEPTUAL; break;
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(image,source_profile,
source_type,target_profile,target_type,intent,flags);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_pixels=AcquirePixelThreadSet(image->columns,source_channels);
target_pixels=AcquirePixelThreadSet(image->columns,target_channels);
if ((source_pixels == (unsigned short **) NULL) ||
(target_pixels == (unsigned short **) NULL))
{
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if (source_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
return(MagickFalse);
}
if (target_colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_colorspace,exception);
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
register unsigned short
*p;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p=source_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=ScaleQuantumToShort(GetPixelRed(image,q));
if (source_channels > 1)
{
*p++=ScaleQuantumToShort(GetPixelGreen(image,q));
*p++=ScaleQuantumToShort(GetPixelBlue(image,q));
}
if (source_channels > 3)
*p++=ScaleQuantumToShort(GetPixelBlack(image,q));
q+=GetPixelChannels(image);
}
cmsDoTransform(transform[id],source_pixels[id],target_pixels[id],
(unsigned int) image->columns);
p=target_pixels[id];
q-=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (target_channels == 1)
SetPixelGray(image,ScaleShortToQuantum(*p),q);
else
SetPixelRed(image,ScaleShortToQuantum(*p),q);
p++;
if (target_channels > 1)
{
SetPixelGreen(image,ScaleShortToQuantum(*p),q);
p++;
SetPixelBlue(image,ScaleShortToQuantum(*p),q);
p++;
}
if (target_channels > 3)
{
SetPixelBlack(image,ScaleShortToQuantum(*p),q);
p++;
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ProfileImage)
#endif
proceed=SetImageProgress(image,ProfileImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_colorspace,exception);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
TrueColorType : TrueColorAlphaType;
break;
}
case cmsSigCmykData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
break;
}
case cmsSigGrayData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
GrayscaleType : GrayscaleAlphaType;
break;
}
default:
break;
}
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if ((status != MagickFalse) &&
(cmsGetDeviceClass(source_profile) != cmsSigLinkClass))
status=SetImageProfile(image,name,profile,exception);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
}
(void) cmsCloseProfile(source_profile);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
unsigned int *quantum)
{
*quantum=(unsigned int) (*p++) << 24;
*quantum|=(unsigned int) (*p++) << 16;
*quantum|=(unsigned int) (*p++) << 8;
*quantum|=(unsigned int) (*p++) << 0;
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++) << 8;
*quantum|=(unsigned short) (*p++);
return(p);
}
static inline void WriteResourceLong(unsigned char *p,
const unsigned int quantum)
{
unsigned char
buffer[4];
buffer[0]=(unsigned char) (quantum >> 24);
buffer[1]=(unsigned char) (quantum >> 16);
buffer[2]=(unsigned char) (quantum >> 8);
buffer[3]=(unsigned char) quantum;
(void) CopyMagickMemory(p,buffer,4);
}
static void WriteTo8BimProfile(Image *image,const char *name,
const StringInfo *profile)
{
const unsigned char
*datum,
*q;
register const unsigned char
*p;
size_t
length;
StringInfo
*profile_8bim;
ssize_t
count;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id,
profile_id;
if (LocaleCompare(name,"icc") == 0)
profile_id=0x040f;
else
if (LocaleCompare(name,"iptc") == 0)
profile_id=0x0404;
else
if (LocaleCompare(name,"xmp") == 0)
profile_id=0x0424;
else
return;
profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,"8bim");
if (profile_8bim == (StringInfo *) NULL)
return;
datum=GetStringInfoDatum(profile_8bim);
length=GetStringInfoLength(profile_8bim);
for (p=datum; p < (datum+length-16); )
{
q=p;
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((count & 0x01) != 0)
count++;
if ((count < 0) || (p > (datum+length-count)) ||
(count > (ssize_t) length))
break;
if (id != profile_id)
p+=count;
else
{
size_t
extent,
offset;
ssize_t
extract_count;
StringInfo
*extract_profile;
extract_count=0;
extent=(datum+length)-(p+count);
if (profile == (StringInfo *) NULL)
{
offset=(q-datum);
extract_profile=AcquireStringInfo(offset+extent);
(void) CopyMagickMemory(extract_profile->datum,datum,offset);
}
else
{
offset=(p-datum);
extract_count=profile->length;
if ((extract_count & 0x01) != 0)
extract_count++;
extract_profile=AcquireStringInfo(offset+extract_count+extent);
(void) CopyMagickMemory(extract_profile->datum,datum,offset-4);
WriteResourceLong(extract_profile->datum+offset-4,
(unsigned int)profile->length);
(void) CopyMagickMemory(extract_profile->datum+offset,
profile->datum,profile->length);
}
(void) CopyMagickMemory(extract_profile->datum+offset+extract_count,
p+count,extent);
(void) AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString("8bim"),CloneStringInfo(extract_profile));
extract_profile=DestroyStringInfo(extract_profile);
break;
}
}
}
static void GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block,ExceptionInfo *exception)
{
const unsigned char
*datum;
register const unsigned char
*p;
size_t
length;
ssize_t
count;
StringInfo
*profile;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((p > (datum+length-count)) || (count > (ssize_t) length) ||
(count < 0))
break;
switch (id)
{
case 0x03ed:
{
unsigned int
resolution;
unsigned short
units;
/*
Resolution.
*/
p=ReadResourceLong(p,&resolution);
image->resolution.x=((double) resolution)/65536.0;
p=ReadResourceShort(p,&units)+2;
p=ReadResourceLong(p,&resolution)+4;
image->resolution.y=((double) resolution)/65536.0;
/*
Values are always stored as pixels per inch.
*/
if ((ResolutionType) units != PixelsPerCentimeterResolution)
image->units=PixelsPerInchResolution;
else
{
image->units=PixelsPerCentimeterResolution;
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"iptc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"icc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"exif",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"xmp",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
}
static MagickBooleanType SetImageProfileInternal(Image *image,const char *name,
const StringInfo *profile,const MagickBooleanType recursive,
ExceptionInfo *exception)
{
char
key[MagickPathExtent],
property[MagickPathExtent];
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MagickPathExtent);
LocaleLower(key);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),CloneStringInfo(profile));
if (status != MagickFalse)
{
if (LocaleCompare(name,"8bim") == 0)
GetProfilesFromResourceBlock(image,profile,exception);
else
if (recursive == MagickFalse)
WriteTo8BimProfile(image,name,profile);
}
/*
Inject profile into image properties.
*/
(void) FormatLocaleString(property,MagickPathExtent,"%s:*",name);
(void) GetImageProperty(image,property,exception);
return(status);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile,ExceptionInfo *exception)
{
return(SetImageProfileInternal(image,name,profile,MagickFalse,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline signed short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) buffer[1] << 8;
value|=(unsigned short) buffer[0];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
value=(unsigned short) buffer[0] << 8;
value|=(unsigned short) buffer[1];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned int
value;
if (endian == LSBEndian)
{
value=(unsigned int) buffer[3] << 24;
value|=(unsigned int) buffer[2] << 16;
value|=(unsigned int) buffer[1] << 8;
value|=(unsigned int) buffer[0];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
value=(unsigned int) buffer[0] << 24;
value|=(unsigned int) buffer[1] << 16;
value|=(unsigned int) buffer[2] << 8;
value|=(unsigned int) buffer[3];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length)
{
signed int
value;
if (*length < 4)
return(0);
value=ReadProfileLong(MSBEndian,*p);
(*length)-=4;
*p+=4;
return(value);
}
static inline signed short ReadProfileMSBShort(unsigned char **p,
size_t *length)
{
signed short
value;
if (*length < 2)
return(0);
value=ReadProfileShort(MSBEndian,*p);
(*length)-=2;
*p+=2;
return(value);
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) CopyMagickMemory(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) CopyMagickMemory(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) CopyMagickMemory(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) CopyMagickMemory(p,buffer,2);
}
static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile)
{
size_t
length;
ssize_t
count;
unsigned char
*p;
unsigned short
id;
length=GetStringInfoLength(profile);
p=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&p,&length) != 0x38)
continue;
if (ReadProfileByte(&p,&length) != 0x42)
continue;
if (ReadProfileByte(&p,&length) != 0x49)
continue;
if (ReadProfileByte(&p,&length) != 0x4D)
continue;
if (length < 7)
return(MagickFalse);
id=ReadProfileMSBShort(&p,&length);
count=(ssize_t) ReadProfileByte(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
p+=count;
if ((*p & 0x01) == 0)
(void) ReadProfileByte(&p,&length);
count=(ssize_t) ReadProfileMSBLong(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
if ((id == 0x3ED) && (count == 16))
{
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian, (unsigned int) (image->resolution.x*2.54*
65536.0),p);
else
WriteProfileLong(MSBEndian, (unsigned int) (image->resolution.x*
65536.0),p);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4);
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian, (unsigned int) (image->resolution.y*2.54*
65536.0),p+8);
else
WriteProfileLong(MSBEndian, (unsigned int) (image->resolution.y*
65536.0),p+8);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12);
}
p+=count;
length-=count;
}
return(MagickTrue);
}
MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
if ((id != 0x4949) && (id != 0x4D4D))
{
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
}
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadProfileLong(endian,exif+4);
if ((offset < 0) || (size_t) offset >= length)
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
int
components;
register unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (q > (exif+length-12))
break; /* corrupt EXIF */
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS))
break;
components=(ssize_t) ReadProfileLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ReadProfileLong(endian,q+8);
if ((offset < 0) || ((size_t) (offset+number_bytes) > length))
continue;
if (~length < number_bytes)
continue; /* prevent overflow */
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
offset=(ssize_t) ReadProfileLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ReadProfileLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
return(MagickTrue);
}
MagickPrivate MagickBooleanType SyncImageProfiles(Image *image)
{
MagickBooleanType
status;
StringInfo
*profile;
status=MagickTrue;
profile=(StringInfo *) GetImageProfile(image,"8BIM");
if (profile != (StringInfo *) NULL)
if (Sync8BimProfile(image,profile) == MagickFalse)
status=MagickFalse;
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile != (StringInfo *) NULL)
if (SyncExifProfile(image,profile) == MagickFalse)
status=MagickFalse;
return(status);
}
|
1.norace10.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 20
int main() {
int A[N][N];
#pragma omp parallel for schedule(guided, 4)
for (int i = 1; i < N; i++)
for (int j = 1; j < N; j++)
A[i][j] = A[i][j - 1];
}
// CHECK: Region is Data Race Free.
// END
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <signal.h>
#include <hdf5.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "allvars.h"
#include "proto.h"
MyIDType *IdSnapTable;
MyIDType *tmpptr;
int *int_tmpptr;
long int *long_int_tmpptr;
long long int *long_long_int_tmpptr;
void get_TotNumPart(void);
int main(int argc, char **argv)
{
if(argc != 3)
{
printf("\n usage: L-BaseTree <parameterfile> <outputnum>\n");
printf(" <parameterfile> see readparmeterfile.c\n");
printf(" <outputnum> snapshot number\n\n");
exit(1);
}
read_parameter_file(argv[1]);
SnapshotNum = atoi(argv[2]);
#if defined(_OPENMP)
printf("OMP: max-threads=%d\n", omp_get_max_threads());
fflush(stdout);
#endif
/*==========================================================================*/
printf("allocating group catalogues...\n"); fflush(stdout);
allocate_group_catalogue(SnapshotNum, &CatA, 1);
allocate_group_catalogue(SnapshotNum, &CatB, 2);
printf("populating group catalogues...\n"); fflush(stdout);
load_group_catalogue(SnapshotNum, &CatA, 1);
load_group_catalogue(SnapshotNum, &CatB, 2);
/*==========================================================================*/
/*==========================================================================*/
printf("allocating subhalo catalogues...\n"); fflush(stdout);
allocate_subhalo_catalogue(SnapshotNum, &CatA, 1);
allocate_subhalo_catalogue(SnapshotNum, &CatB, 2);
printf("populating subhalo catalogues...\n"); fflush(stdout);
load_subhalo_catalogue(SnapshotNum, &CatA, 1);
load_subhalo_catalogue(SnapshotNum, &CatB, 2);
/*==========================================================================*/
/*==========================================================================*/
printf("reading/sorting IDs...\n"); fflush(stdout);
get_id_translation_table(); /* Load IdSnapTable: sorted array of length N_dm with minimum (first) value of Min(ID_dm) and a maximum (last) value of Max(ID_dm) */
printf("reassigning ids ...\n"); fflush(stdout);
reassign_ids(CatA.TotNids, CatA.IdList);
reassign_ids(CatB.TotNids, CatB.IdList);
myfree(IdSnapTable);
printf("done.\n"); fflush(stdout);
/*==========================================================================*/
/*==========================================================================*/
/* set cat->IdToHalo[i] such that each particle can reference the Halo that it is a part of */
printf("preparing ID-to-halo tables...\n"); fflush(stdout);
prepare_index_list(&CatA);
printf("index A done.\n"); fflush(stdout);
prepare_index_list(&CatB);
printf("index B done.\n"); fflush(stdout);
/*==========================================================================*/
/*==========================================================================*/
/* get descendants */
printf("determine_descendants...\n"); fflush(stdout);
determine_descendants(&CatA, &CatB);
printf("desc AB done.\n"); fflush(stdout);
determine_descendants(&CatB, &CatA);
printf("desc BA done.\n");
fflush(stdout);
printf("descendants done.\n"); fflush(stdout);
/*==========================================================================*/
/*==========================================================================*/
printf("Doing Backward decision ...\n");
decide_backwards(&CatA, &CatB);
printf("Backward decision for AB done.\n");
fflush(stdout);
/*==========================================================================*/
printf("saving descendants...\n"); fflush(stdout);
save_decendant_list();
printf("saving done.\n"); fflush(stdout);
/*==========================================================================*/
delete_id_translation_table();
return 0;
}
void decide_backwards(struct halo_catalogue *catA, struct halo_catalogue *catB)
{
int i, j;
for(i = 0; i < catA->TotNsubhalos; i++)
{
j = catA->Descendant[i].HaloIndex;
if (j > -1)
if (catB->Descendant[j].HaloIndex != i)
catA->Descendant[i].HaloIndex = -1;
}
fflush(stdout);
}
struct cand_data
{
int haloindex;
float weight;
};
void determine_descendants(struct halo_catalogue *catA, struct halo_catalogue *catB)
{
int i, j, ndiff, ncand, haloB, prev, maxlen;
MyIDType id;
float weightmax;
int halomax;
struct cand_data *candlist, *difflist;
maxlen = 0;
for(i = 0; i < catA->TotNsubhalos; i++)
if(catA->SubLen[i] > maxlen)
maxlen = catA->SubLen[i];
#if defined(_OPENMP)
#pragma omp parallel private(candlist, difflist, ncand, i, j, id, haloB, ndiff, prev, weightmax, halomax)
#endif
{
candlist = mymalloc(maxlen * sizeof(struct cand_data));
difflist = mymalloc(maxlen * sizeof(struct cand_data));
#if defined(_OPENMP)
#pragma omp for schedule(dynamic) nowait
#endif
for(i = 0; i < catA->TotNsubhalos; i++) // for each subhalo in Snapshot A ...
{
ncand = 0;
for(j = 0; j < catA->SubLen[i]; j++) // ... and for each particle in each subhalo
{
id = catA->IdList[catA->SubOffset[i] + j]; // ... identify the particle's ID
if(id >= 0 && id < TotNumPart) // ... (and as long as it's in the accetable range)
{
haloB = catB->IdToHalo[id]; // ... identify the halo that contains this particle in snapshot B
if(haloB >= 0) // all particles are in haloes (they have -1), but if it is in a halo...
{
candlist[ncand].haloindex = haloB; // ... set the haloindex accordingly
candlist[ncand].weight = 1.0 / pow(j + 1, ALPHA); // ... and set the weighting based on how bound it was
ncand++;
}
}
else
{
char buf[100];
long_to_str(buf, id);
printf("bummer! i=%d id=%s TotumPart=%d\n", i, buf, (int)TotNumPart);
exit(4);
}
}
qsort(candlist, ncand, sizeof(struct cand_data), sort_candlist);
for(j = 0, ndiff = 0, prev = -1; j < ncand; j++)
{
if(candlist[j].haloindex != prev)
{
ndiff++;
difflist[ndiff - 1].haloindex = candlist[j].haloindex;
difflist[ndiff - 1].weight = 0;
}
difflist[ndiff - 1].weight += candlist[j].weight;
prev = candlist[j].haloindex;
}
weightmax = 0;
halomax = -1;
for(j = 0; j < ndiff; j++)
{
if(difflist[j].weight > weightmax)
{
weightmax = difflist[j].weight;
halomax = difflist[j].haloindex;
}
}
if(ndiff > 0 && halomax >= 0)
{
catA->Descendant[i].HaloIndex = halomax;
}
else
{
catA->Descendant[i].HaloIndex= -1;
}
}
myfree(candlist);
myfree(difflist);
}
}
int sort_twoids_id(const void *a, const void *b)
{
if(((struct twoids *) a)->id < ((struct twoids *) b)->id)
return -1;
if(((struct twoids *) a)->id > ((struct twoids *) b)->id)
return +1;
return 0;
}
int sort_twoids_ord(const void *a, const void *b)
{
if(((struct twoids *) a)->ord < ((struct twoids *) b)->ord)
return -1;
if(((struct twoids *) a)->ord > ((struct twoids *) b)->ord)
return +1;
return 0;
}
int sort_candlist(const void *a, const void *b)
{
if(((struct cand_data *) a)->haloindex < ((struct cand_data *) b)->haloindex)
return -1;
if(((struct cand_data *) a)->haloindex > ((struct cand_data *) b)->haloindex)
return +1;
return 0;
}
int sort_IDType(const void *a, const void *b)
{
if(*((MyIDType *) a) < *((MyIDType *) b))
return -1;
if(*((MyIDType *) a) > *((MyIDType *) b))
return +1;
return 0;
}
void prepare_index_list(struct halo_catalogue *cat)
{
MyIDType id;
signed long long ii;
int i, j;
cat->IdToHalo = mymalloc(sizeof(int) * TotNumPart);
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for(ii = 0; ii < TotNumPart; ii++) // start by assigning all particles to no halo
cat->IdToHalo[ii] = -1;
#if defined(_OPENMP)
#pragma omp parallel for private(j,id)
#endif
for(i = 0; i < cat->TotNsubhalos; i++) // loop over all subhalos
for(j = 0; j < cat->SubLen[i]; j++) // loop over all particles in each subhalo
{
id = cat->IdList[cat->SubOffset[i] + j]; // id from the subhalo list
if(id >= 0 && id < TotNumPart)
cat->IdToHalo[id] = i;
else
{
char buf[100];
long_to_str(buf, id);
printf("bummer! i=%d j=%d id=%s id=%d TotNumPart=%d)\n", i, j, buf, (int)id, (int)TotNumPart);
exit(1);
}
}
}
void allocate_group_catalogue(int num, struct halo_catalogue *cat, int which)
{
int nids, nFiles, nsubhalos, ngroups;
char buf[1000];
if (which==1)
sprintf(buf, "%s/groups_%03d/fof_subhalo_tab_%03d.%d.hdf5", OutputDir1, num, num, 0);
else
sprintf(buf, "%s/groups_%03d/fof_subhalo_tab_%03d.%d.hdf5", OutputDir2, num, num, 0);
read_basic_subfind_header_hdf5(buf, 0, cat, &nFiles , &nids , &nsubhalos, &ngroups);
int jjj;
cat->GroupNsubs = mymalloc(sizeof(MyIDType) * cat->TotNgroups);
cat->GroupLen = mymalloc(sizeof(MyIDType) * cat->TotNgroups);
cat->GroupOffset = mymalloc(sizeof(MyIDType) * cat->TotNgroups);
cat->GroupLenType = mymalloc(6 * sizeof(MyIDType *));
cat->GroupOffsetType = mymalloc(6 * sizeof(MyIDType *));
cat->SubhaloLenType = mymalloc(6 * sizeof(MyIDType *));
cat->Group = mymalloc(sizeof(struct group_data) * cat->TotNgroups);
for(jjj=0 ; jjj< 6; jjj++)
{
cat->GroupLenType[jjj] = mymalloc(cat->TotNgroups * sizeof(MyIDType));
cat->GroupOffsetType[jjj] = mymalloc(cat->TotNgroups * sizeof(MyIDType));
}
for(jjj=0; jjj< cat->TotNgroups; jjj++)
cat->Group[jjj].count = 0;
}
void load_group_catalogue(int num, struct halo_catalogue *cat, int which)
{
int i=0, nids, nFiles, nsubhalos, ngroups, groupcount;
char buf[1000];
if (which==1)
sprintf(buf, "%s/groups_%03d/fof_subhalo_tab_%03d.%d.hdf5", OutputDir1, num, num, i);
else
sprintf(buf, "%s/groups_%03d/fof_subhalo_tab_%03d.%d.hdf5", OutputDir2, num, num, i);
read_basic_subfind_header_hdf5(buf, i, cat, &nFiles , &nids , &nsubhalos, &ngroups);
groupcount = 0;
printf("starting the group loading loop\n"); fflush(stdout);
for(i = 0, nFiles = 1; i < nFiles; i++)
{
if (which==1)
sprintf(buf, "%s/groups_%03d/fof_subhalo_tab_%03d.%d.hdf5", OutputDir1, num, num, i);
else
sprintf(buf, "%s/groups_%03d/fof_subhalo_tab_%03d.%d.hdf5", OutputDir2, num, num, i);
if(i == 1)
printf(" ... to ... \n");
if(i == 0 || i == nFiles-1)
printf("Loading : %s\n",buf);
read_basic_subfind_header_hdf5(buf, i, cat, &nFiles , &nids , &nsubhalos, &ngroups);
if(ngroups > 0)
read_subfind_group_hdf5(buf, i, cat, ngroups, groupcount);
groupcount += ngroups;
}
for(i=0 ; i < cat->TotNgroups ; i++)
cat->Group[i].count = 0;
printf("finished the group loading loop\n"); fflush(stdout);
}
void allocate_subhalo_catalogue(int num, struct halo_catalogue *cat, int which)
{
int nids, nFiles, nsubhalos, ngroups;
char buf[1000];
if (which==1)
sprintf(buf, "%s/groups_%03d/fof_subhalo_tab_%03d.%d.hdf5", OutputDir1, num, num, 0);
else
sprintf(buf, "%s/groups_%03d/fof_subhalo_tab_%03d.%d.hdf5", OutputDir2, num, num, 0);
read_basic_subfind_header_hdf5(buf, 0, cat, &nFiles , &nids , &nsubhalos, &ngroups);
int iii,jjj, i;
cat->SubLen = mymalloc(sizeof(int) * cat->TotNsubhalos);
cat->SubParentHalo = mymalloc(sizeof(int) * cat->TotNsubhalos);
cat->SubOffset = mymalloc(sizeof(MyIDType) * cat->TotNsubhalos);
cat->Descendant = mymalloc(sizeof(struct descendant_data) * cat->TotNsubhalos);
cat->SubhaloGrNr = mymalloc(sizeof(MyIDType) * cat->TotNsubhalos);
cat->SubhaloLen = mymalloc(sizeof(MyIDType) * cat->TotNsubhalos);
cat->SubhaloLenType = mymalloc(6 * sizeof(MyIDType *));
for (i=0; i < cat->TotNsubhalos; i++)
cat->Descendant[i].HaloIndex=-1;
for(jjj=0 ; jjj< 6; jjj++)
cat->SubhaloLenType[jjj] = mymalloc(cat->TotNsubhalos * sizeof(MyIDType));
for(iii=0 ; iii < cat->TotNgroups ; iii++)
if(cat->GroupNsubs[iii] > 0)
cat->Group[iii].Subhalo = mymalloc(sizeof(struct subhalo_data) * cat->GroupNsubs[iii]);
}
void load_subhalo_catalogue(int num, struct halo_catalogue *cat, int which)
{
int i=0, nids, nFiles, nsubhalos, subcount, ngroups;
char buf[1000];
MyIDType * local_id_array, ndm = 0 , Nskip = 0;
if (which==1)
sprintf(buf, "%s/groups_%03d/fof_subhalo_tab_%03d.%d.hdf5", OutputDir1, num, num, i);
else
sprintf(buf, "%s/groups_%03d/fof_subhalo_tab_%03d.%d.hdf5", OutputDir2, num, num, i);
read_basic_subfind_header_hdf5(buf, i, cat, &nFiles , &nids , &nsubhalos, &ngroups);
subcount = 0;
printf("starting the subhalo loading loop\n"); fflush(stdout);
for(i = 0, nFiles = 1; i < nFiles; i++)
{
if (which==1)
sprintf(buf, "%s/groups_%03d/fof_subhalo_tab_%03d.%d.hdf5", OutputDir1, num, num, i);
else
sprintf(buf, "%s/groups_%03d/fof_subhalo_tab_%03d.%d.hdf5", OutputDir2, num, num, i);
if(i == 1)
printf(" ... to ... \n");
if(i == 0 || i == nFiles-1)
printf("Loading : %s\n",buf);
read_basic_subfind_header_hdf5(buf, i, cat, &nFiles , &nids , &nsubhalos, &ngroups);
if(nsubhalos > 0)
read_subfind_subhalo_hdf5(buf, i, cat, nsubhalos, subcount);
subcount += nsubhalos;
}
long_to_str(buf, cat->TotNids);
printf("finished the subhalo loading loop\n"); fflush(stdout);
int iii,jjj,j;
long int subfind_dm_ids=0;
for(i=0 ; i < cat->TotNsubhalos ; i++)
{
cat->Group[cat->SubhaloGrNr[i]].Subhalo[cat->Group[cat->SubhaloGrNr[i]].count].SubhaloLen = cat->SubhaloLen[i];
for(j=0 ; j < 6 ; j++)
cat->Group[cat->SubhaloGrNr[i]].Subhalo[cat->Group[cat->SubhaloGrNr[i]].count].SubhaloLenType[j] = cat->SubhaloLenType[j][i];
cat->Group[cat->SubhaloGrNr[i]].count = cat->Group[cat->SubhaloGrNr[i]].count + 1;
}
for(iii = 0; iii < cat->TotNgroups; iii++) // for each group
for(jjj = 0; jjj < cat->Group[iii].count ; jjj++) // and each subhalo within the group
subfind_dm_ids += cat->Group[iii].Subhalo[jjj].SubhaloLenType[1];
cat->TotNids = subfind_dm_ids;
cat->IdList = mymalloc( subfind_dm_ids * sizeof(MyIDType));
i = 0;
if (which==1)
sprintf(buf, "%s/snapdir_%03d/%s_%03d.%d.hdf5", OutputDir1, num,SnapshotFileBase1, num, i); // change 1
else
sprintf(buf, "%s/snapdir_%03d/%s_%03d.%d.hdf5", OutputDir2, num,SnapshotFileBase2, num, i); // change 1
read_snap_header_attributes_in_hdf5(buf);
ndm = header.npartTotal[1]+ ((long long) header.npartTotalHighWord[1] << 32);
local_id_array = mymalloc(ndm * sizeof(MyIDType));
for(i = 0; i < nFiles; i++)
{
if (which==1)
sprintf(buf, "%s/snapdir_%03d/%s_%03d.%d.hdf5", OutputDir1, num, SnapshotFileBase1, num, i);
else
sprintf(buf, "%s/snapdir_%03d/%s_%03d.%d.hdf5", OutputDir2, num, SnapshotFileBase2, num, i);
if(i == 0 || i == nFiles-1)
printf(" and : %s\n",buf);
read_snap_header_attributes_in_hdf5(buf);
read_particle_ids_in_hdf5(buf, 1, local_id_array, Nskip); // loads all dm particle ids
Nskip += header.npart[1];
}
int k;
MyIDType local_idcount=0, local_galaxycount=0;
i = j = k = 0;
printf("starting the assignment loop\n"); fflush(stdout);
MyIDType cumulative_subhalo_offset = 0, local_offset = 0;
for(i = 0; i < cat->TotNgroups; i++) // for each group
{
cat->GroupOffset[i] = cumulative_subhalo_offset;
local_offset = 0;
for(j = 0; j < cat->Group[i].count ; j++) // and each subhalo within the group
{
for(k = 0; k < cat->Group[i].Subhalo[j].SubhaloLenType[1] ; k++) // and each DM particle within the subhalo
{
cat->IdList[local_idcount] = local_id_array[cat->GroupOffsetType[1][i] + local_offset + k ]; // can't trust this group offset
local_idcount++;
#ifdef VERBOSE
#ifdef LONGIDS
if (i < 2 && j < 2 && k < 2)
{
printf("cat->GroupOffsetType[1][i] = %lu, local_offset = %d, k = %d, local_id_array[%lu] = %llu\n",
cat->GroupOffsetType[1][i], local_offset, k, cat->GroupOffsetType[1][i] + local_offset + k , local_id_array[cat->GroupOffsetType[1][i] + local_offset + k ]);
printf("Group %d, Subhalo %d, Particle %d, ID = %llu\n",i,j,k,local_id_array[cat->GroupOffsetType[1][i] + local_offset + k ]);
}
#else
if (i < 10 && j < 10 && k < 10)
printf("Group %d, Subhalo %d, Particle %d, ID = %d\n",i,j,k,local_id_array[cat->GroupOffsetType[1][i] + local_offset + k ]);
#endif
#endif
}
cat->SubOffset[local_galaxycount] = cumulative_subhalo_offset;
cat->SubLen[local_galaxycount] = cat->Group[i].Subhalo[j].SubhaloLenType[1];
cumulative_subhalo_offset += cat->Group[i].Subhalo[j].SubhaloLenType[1];
local_offset += cat->Group[i].Subhalo[j].SubhaloLenType[1];
local_galaxycount++;
}
#ifdef VERBOSE
if(i < 10)
{
printf("First ID of Group %d can be indexed as:\n",i);
printf(" local_id_array[cat->GroupOffsetType[1][%d]] = %llu where cat->GroupOffsetType[1][%d] = %llu\n",
i,local_id_array[cat->GroupOffsetType[1][i]],i,cat->GroupOffsetType[1][i]);
printf(" cat->IdList[cat->GroupOffset[%d]] = %llu where cat->GroupOffset[%d] = %llu\n\n",
i,cat->IdList[cat->GroupOffset[i]], i, cat->GroupOffset[i]);
}
#endif
}
printf("finishing the assignment loop\n"); fflush(stdout);
myfree(local_id_array);
}
void save_decendant_list(void)
{
int i, *data;
char buf[1000];
//count matches
int NSubhaloMatches = 0, checkcount;
for (i = 0; i < CatA.TotNsubhalos; i++)
if (CatA.Descendant[i].HaloIndex != -1)
NSubhaloMatches++;
sprintf(buf, "%s/subhalo_match_%03d.hdf5", MatchOutputDir, SnapshotNum);
hsize_t dims[1];
hid_t hdf5_datatype=0, hdf5_dataspace_in_file, hdf5_dataset, hdf5_dataspace_memory;
hid_t file = H5Fcreate(buf, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
hid_t hdf5_dataspace = H5Screate(H5S_SCALAR);
hid_t hdf5_attribute = H5Acreate(file, "NSubhaloMatches", H5T_NATIVE_INT, hdf5_dataspace, H5P_DEFAULT);
H5Awrite(hdf5_attribute, H5T_NATIVE_INT, &NSubhaloMatches);
H5Aclose(hdf5_attribute);
hid_t atype = H5Tcopy(H5T_C_S1);
H5Tset_size(atype, strlen(OutputDir1));
hdf5_attribute = H5Acreate(file, "SubhaloPathFrom", atype, hdf5_dataspace, H5P_DEFAULT);
H5Awrite(hdf5_attribute, atype, &OutputDir1);
H5Aclose(hdf5_attribute);
H5Tset_size(atype, strlen(OutputDir2));
hdf5_attribute = H5Acreate(file, "SubhaloPathTo", atype, hdf5_dataspace, H5P_DEFAULT);
H5Awrite(hdf5_attribute, atype, &OutputDir2);
H5Aclose(hdf5_attribute);
H5Tclose(atype);
H5Sclose(hdf5_dataspace);
data = mymalloc(sizeof(int) * NSubhaloMatches);
checkcount = 0;
dims[0] = NSubhaloMatches;
hdf5_dataspace_in_file = H5Screate_simple(1, dims, NULL);
hdf5_dataspace_memory = H5Screate_simple(1, dims, NULL);
hdf5_datatype = H5Tcopy(H5T_NATIVE_INT);
hdf5_dataset = H5Dcreate(file, "SubhaloIndexFrom", hdf5_datatype, hdf5_dataspace_in_file, H5P_DEFAULT);
for (checkcount = 0, i = 0; i < CatA.TotNsubhalos; i++)
if (CatA.Descendant[i].HaloIndex != -1)
{
data[checkcount++] = i;
}
if (checkcount != NSubhaloMatches)
{
printf("BAD\n");
exit(-1);
}
H5Dwrite(hdf5_dataset, hdf5_datatype, hdf5_dataspace_memory, hdf5_dataspace_in_file, H5P_DEFAULT, data);
H5Sclose(hdf5_dataspace_memory);
H5Dclose(hdf5_dataset);
H5Sclose(hdf5_dataspace_in_file);
H5Tclose(hdf5_datatype);
dims[0] = NSubhaloMatches;
hdf5_dataspace_in_file = H5Screate_simple(1, dims, NULL);
hdf5_dataspace_memory = H5Screate_simple(1, dims, NULL);
hdf5_datatype = H5Tcopy(H5T_NATIVE_INT);
hdf5_dataset = H5Dcreate(file, "SubhaloIndexTo", hdf5_datatype, hdf5_dataspace_in_file, H5P_DEFAULT);
for (checkcount = 0, i = 0; i < CatA.TotNsubhalos; i++)
if (CatA.Descendant[i].HaloIndex != -1)
{
data[checkcount++] = CatA.Descendant[i].HaloIndex;
}
if (checkcount != NSubhaloMatches)
{
printf("BAD\n");
exit(-1);
}
H5Dwrite(hdf5_dataset, hdf5_datatype, hdf5_dataspace_memory, hdf5_dataspace_in_file, H5P_DEFAULT, data);
H5Sclose(hdf5_dataspace_memory);
H5Dclose(hdf5_dataset);
H5Sclose(hdf5_dataspace_in_file);
H5Tclose(hdf5_datatype);
H5Fclose(file);
myfree(data);
}
void delete_id_translation_table(void)
{
char buf[1000];
sprintf(buf, "%s/sorted_id_table_%03d.hdf5", MatchOutputDir, SnapshotNum);
unlink(buf);
}
void get_id_translation_table(void)
{
FILE *fd;
char buf[1000], buf2[1000], bufA[100], bufB[100];
int filenr, numfiles;
MyIDType i, minID, maxID, Nskip = 0;
printf("reading IDs from last snapshot\n");
fflush(stdout);
sprintf(buf, "%s/sorted_id_table_%03d.hdf5", MatchOutputDir, SnapshotNum);
if((fd = fopen(buf, "r")))
{
fclose(fd);
printf("ok, I'm reading '%s'\n", buf);
fflush(stdout);
read_num_part_table_hdf5(buf, &TotNumPart );
IdSnapTable = mymalloc(TotNumPart * sizeof(MyIDType));
read_id_translation_table_hdf5(buf, TotNumPart, IdSnapTable );
printf("TotNumPart = %llu \n",TotNumPart);
fflush(stdout);
printf("finished reading sorted id table!\n");
fflush(stdout);
}
else
{
numfiles = 1;
for(filenr = 0; filenr < numfiles; filenr++)
{
if(filenr == 0)
printf("Starting to read...\n");
sprintf(buf, "%s/%s_%03d.hdf5", OutputDir1, SnapshotFileBase1, SnapshotNum);
sprintf(buf2, "%s/snapdir_%03d/%s_%03d.%d.hdf5", OutputDir1, SnapshotNum, SnapshotFileBase1,
SnapshotNum, filenr);
printf(" %s\n",buf2);
read_snap_header_attributes_in_hdf5(buf2);
if(filenr == 0)
{
numfiles = header.num_files;
TotNumPart =
//header.npartTotal[0] + (((long long) header.npartTotalHighWord[0]) << (long long) 32) +
header.npartTotal[1] + (((long long) header.npartTotalHighWord[1]) << (long long) 32);
//header.npartTotal[4] + (((long long) header.npartTotalHighWord[4]) << (long long) 32);
long_to_str(bufA, TotNumPart);
#ifdef VERBOSE
printf("Allocating IdSnapTable...\n");
printf(" header.npartTotal[0] = %d\n",header.npartTotal[0]);
printf(" header.npartTotal[1] = %d\n",header.npartTotal[1]);
printf(" header.npartTotal[4] = %d\n",header.npartTotal[4]);
printf(" TotNumPart = %llu\n\n",TotNumPart);
#endif
IdSnapTable = mymalloc(TotNumPart * sizeof(MyIDType));
}
int parttype;
parttype = 1;
read_particle_ids_in_hdf5(buf2, parttype , IdSnapTable , Nskip);
Nskip += header.npart[parttype];
#ifdef VERBOSE
if(filenr == 0)
{
printf("\n\n Check that ids are being loaded properly...\n");
printf(" First 10 DM particle ids in %s are:\n");
int id_check;
for(id_check=0 ; id_check < 10 ; id_check ++)
#ifdef LONGIDS
printf(" ID[%d] = %llu\n",id_check,IdSnapTable[id_check+Nskip - header.npart[0]]);
#else
printf(" ID[%d] = %d\n",id_check,IdSnapTable[id_check+Nskip - header.npart[0]]);
#endif // LONGIDS
}
#endif // VERBOSE
}
printf("TotNumPart=%s\n", bufA);
printf("IDs read.\n");
fflush(stdout);
for(i = 1, minID = maxID = IdSnapTable[0]; i < TotNumPart; i++)
{
if(minID > IdSnapTable[i])
minID = IdSnapTable[i];
if(maxID < IdSnapTable[i])
maxID = IdSnapTable[i];
}
long_to_str(bufA, minID);
long_to_str(bufB, maxID);
printf("min-ID=%s max-ID=%s\n", bufA, bufB);
printf("sorting IDs\n");
fflush(stdout);
qsort(IdSnapTable, Nskip, sizeof(MyIDType), sort_IDType);
printf("sorting done\n");
fflush(stdout);
printf("writing sorted id table...\n");
fflush(stdout);
write_id_translation_table_hdf5(IdSnapTable, TotNumPart);
}
}
void reassign_ids(MyIDType N, MyIDType * ids)
{
long long i, j, offset, NN;
#if defined(_OPENMP)
int tid;
int nthreads;
#endif
struct twoids *TwoIDs;
printf("reassign IDs...\n");
fflush(stdout);
#if defined(_OPENMP)
#pragma omp parallel private(tid, nthreads, offset, NN, i, j, TwoIDs) shared(IdSnapTable)
#endif
{
#if defined(_OPENMP)
tid = omp_get_thread_num();
nthreads = omp_get_max_threads();
offset = tid * (N / nthreads);
NN = (N / nthreads);
if(nthreads > 1 && tid == (nthreads - 1))
{
NN = N - offset;
}
#else
NN = N;
offset = 0;
#endif
TwoIDs = mymalloc(NN * sizeof(struct twoids));
for(i = 0; i < NN; i++) // load all ids into the TwoID array
{
TwoIDs[i].id = ids[i + offset]; // the ids at each location
TwoIDs[i].ord = i; // the index at each location
}
qsort(TwoIDs, NN, sizeof(struct twoids), sort_twoids_id); // sort them by id! -> Min id first
/* now assign */
j = 0;
for(i = 0; i < NN; i++)
{
while(IdSnapTable[j] < TwoIDs[i].id && j < (TotNumPart - 1)) // this breaks when IdSnapTable[j] == TwoIDs[i].id
j++;
if(IdSnapTable[j] != TwoIDs[i].id) // if this occurs, should imply that
{ // - j reached TotNumPart without finding a match...
printf("ID mismatch found?\n"); // -> this means there is a particle in the subfind catalog not in the snapshot (IdSnapTable)
printf("IdSnapTable[%llu] = %llu TwoIDs[%llu].id = %llu TotNumPart = %llu \n",j,IdSnapTable[j], i,TwoIDs[i].id, TotNumPart);
exit(1);
}
else
TwoIDs[i].id = j; // THIS IS THE KEY POINT -- THE NEW ID IS THE INDEX IN IdSnapTable!!! min=0; max=N_dm
}
/* sort back */
qsort(TwoIDs, NN, sizeof(struct twoids), sort_twoids_ord); // Sort them by orig order -> old first entry is again first
for(i = 0; i < NN; i++)
ids[i + offset] = TwoIDs[i].id; // repackage them back into the origional array
myfree(TwoIDs);
}
printf("done\n");
fflush(stdout);
}
void long_to_str(char *s, long long n)
{
if(n >= 1000000000)
sprintf(s, "%d%09d", (int) (n / 1000000000), (int) (n % 1000000000));
else
sprintf(s, "%d", (int) n);
}
|
LAGraph_BF_full1.c | //------------------------------------------------------------------------------
// LAGraph_BF_full1.c: Bellman-Ford single-source shortest paths, returns tree,
// while diagonal of input matrix A needs not to be explicit 0
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2019 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_BF_full1: Bellman-Ford single source shortest paths, returning both
// the path lengths and the shortest-path tree. contributed by Jinhao Chen and
// Tim Davis, Texas A&M.
// LAGraph_BF_full1 performs a Bellman-Ford to find out shortest path, parent
// nodes along the path and the hops (number of edges) in the path from given
// source vertex s in the range of [0, n) on graph given as matrix A with size
// n*n. The sparse matrix A has entry A(i, j) if there is an edge from vertex i
// to vertex j with weight w, then A(i, j) = w.
// TODO: think about the return values
// LAGraph_BF_full1 returns GrB_SUCCESS if it succeeds. In this case, there
// are no negative-weight cycles in the graph, and d, pi, and h are returned.
// The vector d has d(k) as the shortest distance from s to k. pi(k) = p+1,
// where p is the parent node of k-th node in the shortest path. In particular,
// pi(s) = 0. h(k) = hop(s, k), the number of edges from s to k in the shortest
// path.
// If the graph has a negative-weight cycle, GrB_NO_VALUE is returned, and the
// GrB_Vectors d(k), pi(k) and h(k) (i.e., *pd_output, *ppi_output and
// *ph_output respectively) will be NULL when negative-weight cycle detected.
// Otherwise, other errors such as GrB_OUT_OF_MEMORY, GrB_INVALID_OBJECT, and
// so on, can be returned, if these errors are found by the underlying
// GrB_* functions.
//------------------------------------------------------------------------------
#include "BF_test.h"
#define LAGRAPH_FREE_WORK \
{ \
GrB_free(&d); \
GrB_free(&dmasked); \
GrB_free(&dless); \
GrB_free(&Atmp); \
GrB_free(&BF_Tuple3); \
GrB_free(&BF_lMIN_Tuple3); \
GrB_free(&BF_PLUSrhs_Tuple3); \
GrB_free(&BF_LT_Tuple3); \
GrB_free(&BF_lMIN_Tuple3_Monoid); \
GrB_free(&BF_lMIN_PLUSrhs_Tuple3); \
LAGRAPH_FREE (I); \
LAGRAPH_FREE (J); \
LAGRAPH_FREE (w); \
LAGRAPH_FREE (W); \
LAGRAPH_FREE (h); \
LAGRAPH_FREE (pi); \
}
#define LAGRAPH_FREE_ALL \
{ \
LAGRAPH_FREE_WORK \
GrB_free (pd_output); \
GrB_free (ppi_output); \
GrB_free (ph_output); \
}
//------------------------------------------------------------------------------
// data type for each entry of the adjacent matrix A and "distance" vector d;
// <INFINITY,INFINITY,INFINITY> corresponds to nonexistence of a path, and
// the value <0, 0, NULL> corresponds to a path from a vertex to itself
//------------------------------------------------------------------------------
typedef struct
{
double w; // w corresponds to a path weight.
GrB_Index h; // h corresponds to a path size or number of hops.
GrB_Index pi;// pi corresponds to the penultimate vertex along a path.
// vertex indexed as 1, 2, 3, ... , V, and pi = 0 (as nil)
// for u=v, and pi = UINT64_MAX (as inf) for (u,v) not in E
}
BF_Tuple3_struct;
//------------------------------------------------------------------------------
// 2 binary functions, z=f(x,y), where Tuple3xTuple3 -> Tuple3
//------------------------------------------------------------------------------
void BF_lMIN
(
BF_Tuple3_struct *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
if (x->w < y->w
|| (x->w == y->w && x->h < y->h)
|| (x->w == y->w && x->h == y->h && x->pi < y->pi))
{
if (z != x) { *z = *x; }
}
else
{
*z = *y;
}
}
void BF_PLUSrhs
(
BF_Tuple3_struct *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
z->w = x->w + y->w;
z->h = x->h + y->h;
if (x->pi != UINT64_MAX && y->pi != 0)
{
z->pi = y->pi;
}
else
{
z->pi = x->pi;
}
}
void BF_Identity
(
BF_Tuple3_struct *z,
const BF_Tuple3_struct *x
)
{
*z = *x;
}
void BF_LT
(
bool *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
if (x->w < y->w
|| (x->w == y->w && x->h < y->h)
|| (x->w == y->w && x->h == y->h && x->pi < y->pi))
{
*z = true;
}
else
{
*z = false;
}
}
// Given a n-by-n adjacency matrix A and a source vertex s.
// If there is no negative-weight cycle reachable from s, return the distances
// of shortest paths from s and parents along the paths as vector d. Otherwise,
// returns d=NULL if there is a negtive-weight cycle.
// pd_output is pointer to a GrB_Vector, where the i-th entry is d(s,i), the
// sum of edges length in the shortest path
// ppi_output is pointer to a GrB_Vector, where the i-th entry is pi(i), the
// parent of i-th vertex in the shortest path
// ph_output is pointer to a GrB_Vector, where the i-th entry is h(s,i), the
// number of edges from s to i in the shortest path
// A has weights on corresponding entries of edges
// s is given index for source vertex
GrB_Info LAGraph_BF_full1
(
GrB_Vector *pd_output, //the pointer to the vector of distance
GrB_Vector *ppi_output, //the pointer to the vector of parent
GrB_Vector *ph_output, //the pointer to the vector of hops
const GrB_Matrix A, //matrix for the graph
const GrB_Index s //given index of the source
)
{
GrB_Info info;
// tmp vector to store distance vector after n (i.e., V) loops
GrB_Vector d = NULL, dmasked = NULL, dless = NULL;
GrB_Matrix Atmp = NULL;
GrB_Type BF_Tuple3;
GrB_BinaryOp BF_lMIN_Tuple3;
GrB_BinaryOp BF_PLUSrhs_Tuple3;
GrB_UnaryOp BF_Identity_Tuple3;
GrB_BinaryOp BF_LT_Tuple3;
GrB_Monoid BF_lMIN_Tuple3_Monoid;
GrB_Semiring BF_lMIN_PLUSrhs_Tuple3;
GrB_Index nrows, ncols, n, nz; // n = # of row/col, nz = # of nnz in graph
GrB_Index *I = NULL, *J = NULL; // for col/row indices of entries from A
GrB_Index *h = NULL, *pi = NULL;
double *w = NULL;
BF_Tuple3_struct *W = NULL;
if (A == NULL || pd_output == NULL ||
ppi_output == NULL || ph_output == NULL)
{
// required argument is missing
LAGRAPH_ERROR ("required arguments are NULL", GrB_NULL_POINTER) ;
}
*pd_output = NULL;
*ppi_output = NULL;
*ph_output = NULL;
LAGr_Matrix_nrows (&nrows, A) ;
LAGr_Matrix_ncols (&ncols, A) ;
LAGr_Matrix_nvals (&nz, A);
if (nrows != ncols)
{
// A must be square
LAGRAPH_ERROR ("A must be square", GrB_INVALID_VALUE) ;
}
n = nrows;
if (s >= n || s < 0)
{
LAGRAPH_ERROR ("invalid value for source vertex s", GrB_INVALID_VALUE);
}
//--------------------------------------------------------------------------
// create all GrB_Type GrB_BinaryOp GrB_Monoid and GrB_Semiring
//--------------------------------------------------------------------------
// GrB_Type
LAGRAPH_OK (GrB_Type_new(&BF_Tuple3, sizeof(BF_Tuple3_struct)));
// GrB_BinaryOp
LAGr_UnaryOp_new(&BF_Identity_Tuple3,
(void*) (&BF_Identity), BF_Tuple3, BF_Tuple3);
LAGr_BinaryOp_new(&BF_LT_Tuple3,
(LAGraph_binary_function) (&BF_LT), GrB_BOOL, BF_Tuple3, BF_Tuple3);
LAGr_BinaryOp_new(&BF_lMIN_Tuple3,
(LAGraph_binary_function) (&BF_lMIN), BF_Tuple3, BF_Tuple3, BF_Tuple3);
LAGr_BinaryOp_new(&BF_PLUSrhs_Tuple3,
(LAGraph_binary_function)(&BF_PLUSrhs),
BF_Tuple3, BF_Tuple3, BF_Tuple3);
// GrB_Monoid
BF_Tuple3_struct BF_identity = (BF_Tuple3_struct) { .w = INFINITY,
.h = UINT64_MAX, .pi = UINT64_MAX };
LAGRAPH_OK(GrB_Monoid_new_UDT(&BF_lMIN_Tuple3_Monoid, BF_lMIN_Tuple3,
&BF_identity));
//GrB_Semiring
LAGr_Semiring_new(&BF_lMIN_PLUSrhs_Tuple3,
BF_lMIN_Tuple3_Monoid, BF_PLUSrhs_Tuple3);
//--------------------------------------------------------------------------
// allocate arrays used for tuplets
//--------------------------------------------------------------------------
I = LAGraph_malloc (nz, sizeof(GrB_Index)) ;
J = LAGraph_malloc (nz, sizeof(GrB_Index)) ;
w = LAGraph_malloc (nz, sizeof(double)) ;
W = LAGraph_malloc (nz, sizeof(BF_Tuple3_struct)) ;
if (I == NULL || J == NULL || w == NULL || W == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// create matrix Atmp based on A, while its entries become BF_Tuple3 type
//--------------------------------------------------------------------------
LAGRAPH_OK(GrB_Matrix_extractTuples_FP64(I, J, w, &nz, A));
int nthreads = LAGraph_get_nthreads ( ) ;
printf ("nthreads %d\n", nthreads) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (GrB_Index k = 0; k < nz; k++)
{
W[k] = (BF_Tuple3_struct) { .w = w[k], .h = 1, .pi = I[k] + 1 };
}
LAGr_Matrix_new(&Atmp, BF_Tuple3, n, n);
LAGRAPH_OK(GrB_Matrix_build_UDT(Atmp, I, J, W, nz, BF_lMIN_Tuple3));
LAGRAPH_FREE (I);
LAGRAPH_FREE (J);
LAGRAPH_FREE (W);
LAGRAPH_FREE (w);
//--------------------------------------------------------------------------
// create and initialize "distance" vector d, dmasked and dless
//--------------------------------------------------------------------------
LAGr_Vector_new(&d, BF_Tuple3, n);
// make d dense
LAGRAPH_OK(GrB_Vector_assign_UDT(d, NULL, NULL, (void*)&BF_identity,
GrB_ALL, n, NULL));
// initial distance from s to itself
BF_Tuple3_struct d0 = (BF_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
LAGRAPH_OK(GrB_Vector_setElement_UDT(d, &d0, s));
// creat dmasked as a sparse vector with only one entry at s
LAGr_Vector_new(&dmasked, BF_Tuple3, n);
LAGRAPH_OK(GrB_Vector_setElement_UDT(dmasked, &d0, s));
// create dless
LAGr_Vector_new(&dless, GrB_BOOL, n);
//--------------------------------------------------------------------------
// start the Bellman Ford process
//--------------------------------------------------------------------------
bool any_dless= true; // if there is any newly found shortest path
int64_t iter = 0; // number of iterations
// terminate when no new path is found or more than V-1 loops
while (any_dless && iter < n - 1)
{
// execute semiring on d and A, and save the result to dtmp
LAGr_vxm(dmasked, GrB_NULL, GrB_NULL,
BF_lMIN_PLUSrhs_Tuple3, dmasked, Atmp, GrB_NULL);
// dless = d .< dtmp
//LAGRAPH_OK (GrB_Vector_clear(dless));
LAGr_eWiseMult(dless, NULL, NULL, BF_LT_Tuple3, dmasked, d,
NULL);
// if there is no entry with smaller distance then all shortest paths
// are found
LAGr_reduce (&any_dless, NULL, GxB_LOR_BOOL_MONOID, dless,
NULL) ;
if(any_dless)
{
// update all entries with smaller distances
LAGr_apply(d, dless, NULL, BF_Identity_Tuple3, dmasked, NULL);
// only use entries that were just updated
LAGr_Vector_clear(dmasked);
LAGr_apply(dmasked, dless, NULL, BF_Identity_Tuple3, d, NULL);
//try:
//LAGr_assign(dmasked, dless, NULL, d, GrB_ALL, n, GrB_DESC_R);
}
iter ++;
}
// check for negative-weight cycle only when there was a new path in the
// last loop, otherwise, there can't be a negative-weight cycle.
if (any_dless)
{
// execute semiring again to check for negative-weight cycle
LAGr_vxm(dmasked, GrB_NULL, GrB_NULL,
BF_lMIN_PLUSrhs_Tuple3, dmasked, Atmp, GrB_NULL);
// dless = d .< dtmp
//LAGRAPH_OK (GrB_Vector_clear(dless));
LAGr_eWiseMult(dless, NULL, NULL, BF_LT_Tuple3, dmasked, d, NULL);
// if there is no entry with smaller distance then all shortest paths
// are found
LAGr_reduce (&any_dless, NULL, GxB_LOR_BOOL_MONOID, dless, NULL) ;
if(any_dless)
{
// printf("A negative-weight cycle found. \n");
LAGRAPH_FREE_ALL;
return (GrB_NO_VALUE) ;
}
}
//--------------------------------------------------------------------------
// extract tuple from "distance" vector d and create GrB_Vectors for output
//--------------------------------------------------------------------------
I = LAGraph_malloc (n, sizeof(GrB_Index)) ;
W = LAGraph_malloc (n, sizeof(BF_Tuple3_struct)) ;
w = LAGraph_malloc (n, sizeof(double)) ;
h = LAGraph_malloc (n, sizeof(GrB_Index)) ;
pi = LAGraph_malloc (n, sizeof(GrB_Index)) ;
if (I == NULL || W == NULL || w == NULL || h == NULL || pi == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
LAGRAPH_OK(GrB_Vector_extractTuples_UDT (I, (void *) W, &n, d));
for (GrB_Index k = 0; k < n; k++)
{
w [k] = W[k].w ;
h [k] = W[k].h ;
pi[k] = W[k].pi;
}
LAGr_Vector_new(pd_output, GrB_FP64, n);
LAGr_Vector_new(ppi_output, GrB_UINT64, n);
LAGr_Vector_new(ph_output, GrB_UINT64, n);
LAGr_Vector_build (*pd_output , I, w , n, GrB_MIN_FP64 );
LAGr_Vector_build (*ppi_output, I, pi, n, GrB_MIN_UINT64);
LAGr_Vector_build (*ph_output , I, h , n, GrB_MIN_UINT64);
LAGRAPH_FREE_WORK;
return (GrB_SUCCESS) ;
}
|
bitmap.h | #ifndef XGBOOST_UTILS_BITMAP_H_
#define XGBOOST_UTILS_BITMAP_H_
/*!
* \file bitmap.h
* \brief a simple implement of bitmap
* NOTE: bitmap is only threadsafe per word access, remember this when using bitmap
* \author Tianqi Chen
*/
#include <vector>
#include "./utils.h"
#include "./omp.h"
namespace xgboost {
namespace utils {
/*! \brief bit map that contains set of bit indicators */
struct BitMap {
/*! \brief internal data structure */
std::vector<uint32_t> data;
/*!
* \brief resize the bitmap to be certain size
* \param size the size of bitmap
*/
inline void Resize(size_t size) {
data.resize((size + 31U) >> 5, 0);
}
/*!
* \brief query the i-th position of bitmap
* \param i the position in
*/
inline bool Get(size_t i) const {
return (data[i >> 5] >> (i & 31U)) & 1U;
}
/*!
* \brief set i-th position to true
* \param i position index
*/
inline void SetTrue(size_t i) {
data[i >> 5] |= (1 << (i & 31U));
}
/*! \brief initialize the value of bit map from vector of bool*/
inline void InitFromBool(const std::vector<int> &vec) {
this->Resize(vec.size());
// parallel over the full cases
bst_omp_uint nsize = static_cast<bst_omp_uint>(vec.size() / 32);
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nsize; ++i) {
uint32_t res = 0;
for (int k = 0; k < 32; ++k) {
int bit = vec[(i << 5) | k];
res |= (bit << k);
}
data[i] = res;
}
if (nsize != vec.size()) data.back() = 0;
for (size_t i = nsize; i < vec.size(); ++i) {
if (vec[i]) this->SetTrue(i);
}
}
/*! \brief clear the bitmap, set all places to false */
inline void Clear(void) {
std::fill(data.begin(), data.end(), 0U);
}
};
} // namespace utils
} // namespace xgboost
#endif
|
private-clauseModificado2.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(){
int i, n = 7;
int a[n], suma = 0;
for (i=0; i<n; i++)
a[i] = i;
#pragma omp parallel
{
//suma = 0;
#pragma omp for
for (i=0; i<n; i++){
suma = suma + a[i];
printf("thread %d suma a[%d] / ", omp_get_thread_num(), i);
}
printf("\n* thread %d suma= %d", omp_get_thread_num(), suma);
}
printf("\n");
}
|
lloyds_par8.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <stdbool.h>
#include <omp.h>
#include "csvparser.h"
void vector_init(double *a, int length) {
for (int i = 0; i < length; i++) {
a[i] = 0;
}
}
void vector_copy(double *dst, double *src, int length) {
for (int i = 0; i < length; i++) {
dst[i] = src[i];
}
}
void vector_add(double *dst, double *a, double *b, int length) {
for (int i = 0; i < length; i++) {
dst[i] = a[i] + b[i];
}
}
void vector_elementwise_avg(double *dst, double *a, int denominator, int length) {
for (int i = 0; i < length; i++) {
dst[i] = a[i] / denominator;
}
}
// Program should take K, a data set (.csv), a delimiter,
// a binary flag data_contains_header, and a binary flag to drop labels
int main(int argc, char *argv[]){
// Seed for consistent cluster center selection
// In a working implementation, seeding would be variable (e.g. time(NULL))
srand(111);
CsvParser *reader;
CsvRow *row;
int i,j;
if(argc < 6){
printf("Incorrect number of args. Should be 5, received %d\n", argc - 1);
exit(1);
}
int K = atoi(argv[1]);
char *data_fp = argv[2];
char *delimiter = argv[3];
int has_header_row = atoi(argv[4]);
int drop_labels = atoi(argv[5]);
// Take in data set
reader = CsvParser_new(data_fp, delimiter, has_header_row);
// Get number of columns
row = CsvParser_getRow(reader);
int num_cols = CsvParser_getNumFields(row);
CsvParser_destroy_row(row);
if (drop_labels){
num_cols--;
}
// Get number of rows like lazy people
int num_rows = 1;
while ((row = CsvParser_getRow(reader))){
num_rows++;
CsvParser_destroy_row(row);
}
// Torch the CsvParser and start again so we can read data in.
CsvParser_destroy(reader);
reader = CsvParser_new(data_fp, delimiter, has_header_row);
double **data_matrix = malloc(num_rows * sizeof(double *));
for (int i = 0; i < num_rows; i++) {
data_matrix[i] = malloc(num_cols * sizeof(double));
}
int row_index = 0;
while ((row = CsvParser_getRow(reader))){
const char **row_fields = CsvParser_getFields(row);
for (int col_index = 0; col_index < num_cols; col_index++) {
data_matrix[row_index][col_index] = atof(row_fields[col_index]);
}
CsvParser_destroy_row(row);
row_index++;
}
CsvParser_destroy(reader);
// Initialize some cluster centers from random rows in our data
// Given the fact that we will usually have way more rows than centers, we can
// probably just roll a number and reroll if we already rolled it. Collisions
// should be relatively infrequent
bool collided;
double centers[K][num_cols];
if (argc == 7) {
int center_indices[3] = {12, 67, 106};
for (i = 0; i < K; i ++) {
vector_copy(centers[i], data_matrix[center_indices[i]], num_cols);
}
} else {
for (i = 0; i < K; i++) {
int center_indices[K];
collided = true;
while (collided) {
center_indices[i] = rand() % num_rows;
collided = false;
for (j = 0; j < i; j++) {
if (center_indices[j] == center_indices[i]) {
collided = true;
break;
}
}
vector_copy(centers[i], data_matrix[center_indices[i]], num_cols);
}
}
}
printf("Initial cluster centers:\n");
for (int i = 0; i < K; i++) {
for (int j = 0; j < num_cols; j++) {
printf("%f ", centers[i][j]);
}
printf("\n");
}
printf("\n");
int num_iterations = 0;
int *clusterings = calloc(num_rows, sizeof(int));
bool changes;
double tstart = omp_get_wtime();
while (1) {
// Assign points to cluster centers
changes = false;
omp_set_num_threads(8);
int center, observation, new_center, col;
double idx_diff, current_diff, best_diff;
#pragma omp parallel for \
private(center, observation, idx_diff, current_diff, best_diff, new_center, col) \
shared(num_rows, K, data_matrix, centers)
for (observation = 0; observation < num_rows; observation++) {
best_diff = INFINITY;
for (center = 0; center < K; center++) {
current_diff = 0;
for (col = 0; col < num_cols; col++) {
idx_diff = data_matrix[observation][col] - centers[center][col];
current_diff += idx_diff * idx_diff;
}
if (current_diff < best_diff) {
best_diff = current_diff;
new_center = center;
}
}
if (clusterings[observation] != new_center) {
// NOTE: There is an acceptable data race on changes. Threads only ever
// set it to true; lost updates are inconsequential. No need to slow
// things down for safety.
changes = true;
clusterings[observation] = new_center;
}
}
// If we didn't change any cluster assignments, we're at convergence
if (!changes) {
break;
}
num_iterations++;
// Find cluster means and reassign centers
int cluster_index, element, elements_in_cluster;
double cluster_means[num_cols];
#pragma omp parallel for \
private(cluster_index, element, elements_in_cluster, cluster_means) \
shared(num_rows, clusterings, data_matrix, K)
for (cluster_index = 0; cluster_index < K; cluster_index++) {
elements_in_cluster = 0;
vector_init(cluster_means, num_cols);
// Aggregate in-cluster values we can use to take the clusterings mean
for (element = 0; element < num_rows; element++) {
if (clusterings[element] == cluster_index) {
vector_add(cluster_means, cluster_means, data_matrix[element], num_cols);
elements_in_cluster++;
}
}
// Finish calculating cluster mean, and overwrite centers with the new value
vector_elementwise_avg(cluster_means, cluster_means, elements_in_cluster, num_cols);
vector_copy(centers[cluster_index], cluster_means, num_cols);
}
}
double tend = omp_get_wtime();
printf("\nFinal cluster centers:\n");
for (int i = 0; i < K; i++) {
for (int j = 0; j < num_cols; j++) {
printf("%f ", centers[i][j]);
}
printf("\n");
}
printf("\nNum iterations: %d\n", num_iterations);
printf("Time taken for %d clusters: %f seconds\n", K, tend - tstart);
for (int i = 0; i < num_rows; i++) {
free(data_matrix[i]);
}
free(data_matrix);
free(clusterings);
exit(0);
}
|
GB_unop__identity_int8_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_fp32)
// op(A') function: GB (_unop_tran__identity_int8_fp32)
// C type: int8_t
// A type: float
// cast: int8_t cij = GB_cast_to_int8_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = GB_cast_to_int8_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_fp32)
(
int8_t *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_reduce_panel.c | //------------------------------------------------------------------------------
// GB_reduce_panel: s=reduce(A), reduce a matrix to a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Reduce a matrix to a scalar using a panel-based method for built-in
// operators. No typecasting is performed. A must be sparse, hypersparse,
// or full (it cannot be bitmap). A cannot have any zombies. If A has zombies
// or is bitmap, GB_reduce_to_scalar_template is used instead.
{
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ;
ASSERT (!A->iso) ;
int64_t anz = GB_nnz (A) ;
ASSERT (anz > 0) ;
ASSERT (!GB_IS_BITMAP (A)) ;
ASSERT (A->nzombies == 0) ;
#if GB_IS_ANY_MONOID
// the ANY monoid can take any entry, and terminate immediately
s = Ax [anz-1] ;
#else
//--------------------------------------------------------------------------
// reduce A to a scalar
//--------------------------------------------------------------------------
if (nthreads == 1)
{
//----------------------------------------------------------------------
// load the Panel with the first entries
//----------------------------------------------------------------------
GB_ATYPE Panel [GB_PANEL] ;
int64_t first_panel_size = GB_IMIN (GB_PANEL, anz) ;
for (int64_t k = 0 ; k < first_panel_size ; k++)
{
Panel [k] = Ax [k] ;
}
#if GB_HAS_TERMINAL
int panel_count = 0 ;
#endif
//----------------------------------------------------------------------
// reduce all entries to the Panel
//----------------------------------------------------------------------
for (int64_t p = GB_PANEL ; p < anz ; p += GB_PANEL)
{
if (p + GB_PANEL > anz)
{
// last partial panel
for (int64_t k = 0 ; k < anz-p ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
}
else
{
// whole panel
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
#if GB_HAS_TERMINAL
panel_count-- ;
if (panel_count <= 0)
{
// check for early exit only every 256 panels
panel_count = 256 ;
int count = 0 ;
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
count += (Panel [k] == GB_TERMINAL_VALUE) ;
}
if (count > 0)
{
break ;
}
}
#endif
}
}
//----------------------------------------------------------------------
// s = reduce (Panel)
//----------------------------------------------------------------------
s = Panel [0] ;
for (int64_t k = 1 ; k < first_panel_size ; k++)
{
// s = op (s, Panel [k]) ;
GB_ADD_ARRAY_TO_SCALAR (s, Panel, k) ;
}
}
else
{
//----------------------------------------------------------------------
// all tasks share a single early_exit flag
//----------------------------------------------------------------------
// If this flag gets set, all tasks can terminate early
#if GB_HAS_TERMINAL
bool early_exit = false ;
#endif
//----------------------------------------------------------------------
// each thread reduces its own slice in parallel
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// determine the work for this task
//------------------------------------------------------------------
// Task tid reduces Ax [pstart:pend-1] to the scalar W [tid]
int64_t pstart, pend ;
GB_PARTITION (pstart, pend, anz, tid, ntasks) ;
GB_ATYPE t = Ax [pstart] ;
//------------------------------------------------------------------
// skip this task if the terminal value has already been reached
//------------------------------------------------------------------
#if GB_HAS_TERMINAL
// check if another task has called for an early exit
bool my_exit ;
GB_ATOMIC_READ
my_exit = early_exit ;
if (!my_exit)
#endif
//------------------------------------------------------------------
// do the reductions for this task
//------------------------------------------------------------------
{
//--------------------------------------------------------------
// load the Panel with the first entries
//--------------------------------------------------------------
GB_ATYPE Panel [GB_PANEL] ;
int64_t my_anz = pend - pstart ;
int64_t first_panel_size = GB_IMIN (GB_PANEL, my_anz) ;
for (int64_t k = 0 ; k < first_panel_size ; k++)
{
Panel [k] = Ax [pstart + k] ;
}
#if GB_HAS_TERMINAL
int panel_count = 0 ;
#endif
//--------------------------------------------------------------
// reduce all entries to the Panel
//--------------------------------------------------------------
for (int64_t p = pstart + GB_PANEL ; p < pend ; p += GB_PANEL)
{
if (p + GB_PANEL > pend)
{
// last partial panel
for (int64_t k = 0 ; k < pend-p ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
}
else
{
// whole panel
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
#if GB_HAS_TERMINAL
panel_count-- ;
if (panel_count <= 0)
{
// check for early exit only every 256 panels
panel_count = 256 ;
int count = 0 ;
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
count += (Panel [k] == GB_TERMINAL_VALUE) ;
}
if (count > 0)
{
break ;
}
}
#endif
}
}
//--------------------------------------------------------------
// t = reduce (Panel)
//--------------------------------------------------------------
t = Panel [0] ;
for (int64_t k = 1 ; k < first_panel_size ; k++)
{
// t = op (t, Panel [k]) ;
GB_ADD_ARRAY_TO_SCALAR (t, Panel, k) ;
}
#if GB_HAS_TERMINAL
if (t == GB_TERMINAL_VALUE)
{
// tell all other tasks to exit early
GB_ATOMIC_WRITE
early_exit = true ;
}
#endif
}
//------------------------------------------------------------------
// save the results of this task
//------------------------------------------------------------------
W [tid] = t ;
}
//----------------------------------------------------------------------
// sum up the results of each slice using a single thread
//----------------------------------------------------------------------
s = W [0] ;
for (int tid = 1 ; tid < ntasks ; tid++)
{
// s = op (s, W [tid]), no typecast
GB_ADD_ARRAY_TO_SCALAR (s, W, tid) ;
}
}
#endif
}
|
convolution_3x3_pack8to1_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_msa(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt)
{
// winograd42 transform kernel
Mat kernel_tm(6 * 6, inch, outch, (size_t)2u);
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 6}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 4b-8a-inch/8a-36-outch/4b
kernel_tm_pack8to1.create(8 * inch / 8, 36, outch / 4 + outch % 4, (size_t)2u * 4, 4);
int p = 0;
for (; p + 3 < outch; p += 4)
{
const Mat k0 = kernel_tm.channel(p);
const Mat k1 = kernel_tm.channel(p + 1);
const Mat k2 = kernel_tm.channel(p + 2);
const Mat k3 = kernel_tm.channel(p + 3);
Mat g0 = kernel_tm_pack8to1.channel(p / 4);
for (int k = 0; k < 36; k++)
{
short* g00 = g0.row<short>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = k0.row<const short>(q + i)[k];
g00[1] = k1.row<const short>(q + i)[k];
g00[2] = k2.row<const short>(q + i)[k];
g00[3] = k3.row<const short>(q + i)[k];
g00 += 4;
}
}
}
}
for (; p < outch; p++)
{
const Mat k0 = kernel_tm.channel(p);
Mat g0 = kernel_tm_pack8to1.channel(p / 4 + p % 4);
for (int k = 0; k < 36; k++)
{
short* g00 = g0.row<short>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = k0.row<const short>(q + i)[k];
g00 += 1;
}
}
}
}
}
static void conv3x3s1_winograd42_pack8to1_int8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
// size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
short tmp[6][6][8];
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8;
for (int m = 0; m < 6; m++)
{
v16i8 _r00_01 = __msa_ld_b(r0, 0);
v16i8 _r02_03 = __msa_ld_b(r0 + 16, 0);
v16i8 _r04_05 = __msa_ld_b(r0 + 32, 0);
v16i8 _extr0001 = __msa_clti_s_b(_r00_01, 0);
v16i8 _extr0203 = __msa_clti_s_b(_r02_03, 0);
v16i8 _extr0405 = __msa_clti_s_b(_r04_05, 0);
v8i16 _r00 = (v8i16)__msa_ilvr_b(_extr0001, _r00_01);
v8i16 _r01 = (v8i16)__msa_ilvl_b(_extr0001, _r00_01);
v8i16 _r02 = (v8i16)__msa_ilvr_b(_extr0203, _r02_03);
v8i16 _r03 = (v8i16)__msa_ilvl_b(_extr0203, _r02_03);
v8i16 _r04 = (v8i16)__msa_ilvr_b(_extr0405, _r04_05);
v8i16 _r05 = (v8i16)__msa_ilvl_b(_extr0405, _r04_05);
v8i16 _v5 = __msa_fill_h(5);
v8i16 _tmp0m = __msa_subv_h(__msa_addv_h(__msa_slli_h(_r00, 2), _r04), __msa_mulv_h(_r02, _v5));
v8i16 _tmp1m = __msa_subv_h(__msa_addv_h(_r04, _r03), __msa_slli_h(__msa_addv_h(_r01, _r02), 2));
v8i16 _tmp2m = __msa_addv_h(__msa_subv_h(_r04, _r03), __msa_slli_h(__msa_subv_h(_r01, _r02), 2));
v8i16 _tmp3m = __msa_subv_h(__msa_subv_h(_r04, _r02), __msa_slli_h(__msa_subv_h(_r01, _r03), 1));
v8i16 _tmp4m = __msa_addv_h(__msa_subv_h(_r04, _r02), __msa_slli_h(__msa_subv_h(_r01, _r03), 1));
v8i16 _tmp5m = __msa_subv_h(__msa_addv_h(__msa_slli_h(_r01, 2), _r05), __msa_mulv_h(_r03, _v5));
__msa_st_h(_tmp0m, tmp[0][m], 0);
__msa_st_h(_tmp1m, tmp[1][m], 0);
__msa_st_h(_tmp2m, tmp[2][m], 0);
__msa_st_h(_tmp3m, tmp[3][m], 0);
__msa_st_h(_tmp4m, tmp[4][m], 0);
__msa_st_h(_tmp5m, tmp[5][m], 0);
r0 += w * 8;
}
short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8;
short* r0_tm_1 = r0_tm_0 + tiles * 8;
short* r0_tm_2 = r0_tm_0 + tiles * 16;
short* r0_tm_3 = r0_tm_0 + tiles * 24;
short* r0_tm_4 = r0_tm_0 + tiles * 32;
short* r0_tm_5 = r0_tm_0 + tiles * 40;
for (int m = 0; m < 6; m++)
{
v8i16 _tmp00 = __msa_ld_h(tmp[m][0], 0);
v8i16 _tmp01 = __msa_ld_h(tmp[m][1], 0);
v8i16 _tmp02 = __msa_ld_h(tmp[m][2], 0);
v8i16 _tmp03 = __msa_ld_h(tmp[m][3], 0);
v8i16 _tmp04 = __msa_ld_h(tmp[m][4], 0);
v8i16 _tmp05 = __msa_ld_h(tmp[m][5], 0);
v8i16 _v5 = __msa_fill_h(5);
v8i16 _r0tm0 = __msa_subv_h(__msa_addv_h(__msa_slli_h(_tmp00, 2), _tmp04), __msa_mulv_h(_tmp02, _v5));
v8i16 _r0tm1 = __msa_subv_h(__msa_addv_h(_tmp04, _tmp03), __msa_slli_h(__msa_addv_h(_tmp01, _tmp02), 2));
v8i16 _r0tm2 = __msa_addv_h(__msa_subv_h(_tmp04, _tmp03), __msa_slli_h(__msa_subv_h(_tmp01, _tmp02), 2));
v8i16 _r0tm3 = __msa_subv_h(__msa_subv_h(_tmp04, _tmp02), __msa_slli_h(__msa_subv_h(_tmp01, _tmp03), 1));
v8i16 _r0tm4 = __msa_addv_h(__msa_subv_h(_tmp04, _tmp02), __msa_slli_h(__msa_subv_h(_tmp01, _tmp03), 1));
v8i16 _r0tm5 = __msa_subv_h(__msa_addv_h(__msa_slli_h(_tmp01, 2), _tmp05), __msa_mulv_h(_tmp03, _v5));
__msa_st_h(_r0tm0, r0_tm_0, 0);
__msa_st_h(_r0tm1, r0_tm_1, 0);
__msa_st_h(_r0tm2, r0_tm_2, 0);
__msa_st_h(_r0tm3, r0_tm_3, 0);
__msa_st_h(_r0tm4, r0_tm_4, 0);
__msa_st_h(_r0tm5, r0_tm_5, 0);
r0_tm_0 += tiles * 48;
r0_tm_1 += tiles * 48;
r0_tm_2 += tiles * 48;
r0_tm_3 += tiles * 48;
r0_tm_4 += tiles * 48;
r0_tm_5 += tiles * 48;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 1 < tiles; i += 2)
{
short* tmpptr = tm2.row<short>(i / 2);
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
v8i16 _r0 = __msa_ld_h(r0, 0);
v8i16 _r1 = __msa_ld_h(r0 + 8, 0);
__msa_st_h(_r0, tmpptr, 0);
__msa_st_h(_r1, tmpptr + 8, 0);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 16;
}
}
for (; i < tiles; i++)
{
short* tmpptr = tm2.row<short>(i / 2 + i % 2);
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
v8i16 _r0 = __msa_ld_h(r0, 0);
__msa_st_h(_r0, tmpptr, 0);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
const Mat kernel0_tm = kernel_tm.channel(p / 4);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 1 < tiles; i += 2)
{
const short* r0 = bb2.row<const short>(i / 2);
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
v4i32 _sum0 = __msa_fill_w(0);
v4i32 _sum1 = __msa_fill_w(0);
v4i32 _sum2 = __msa_fill_w(0);
v4i32 _sum3 = __msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 64);
__builtin_prefetch(k0 + 128);
v8i16 _w0 = __msa_ld_h(k0, 0);
v8i16 _w1 = __msa_ld_h(k0 + 8, 0);
v8i16 _w2 = __msa_ld_h(k0 + 16, 0);
v8i16 _w3 = __msa_ld_h(k0 + 24, 0);
v8i16 _extw0 = __msa_clti_s_h(_w0, 0);
v8i16 _extw1 = __msa_clti_s_h(_w1, 0);
v8i16 _extw2 = __msa_clti_s_h(_w2, 0);
v8i16 _extw3 = __msa_clti_s_h(_w3, 0);
v4i32 _w0l = (v4i32)__msa_ilvr_h(_extw0, _w0);
v4i32 _w0h = (v4i32)__msa_ilvl_h(_extw0, _w0);
v4i32 _w1l = (v4i32)__msa_ilvr_h(_extw1, _w1);
v4i32 _w1h = (v4i32)__msa_ilvl_h(_extw1, _w1);
v4i32 _w2l = (v4i32)__msa_ilvr_h(_extw2, _w2);
v4i32 _w2h = (v4i32)__msa_ilvl_h(_extw2, _w2);
v4i32 _w3l = (v4i32)__msa_ilvr_h(_extw3, _w3);
v4i32 _w3h = (v4i32)__msa_ilvl_h(_extw3, _w3);
v4i32 _val0_0 = __msa_fill_w(r0[0]);
v4i32 _val0_1 = __msa_fill_w(r0[1]);
v4i32 _val0_2 = __msa_fill_w(r0[2]);
v4i32 _val0_3 = __msa_fill_w(r0[3]);
v4i32 _val0_4 = __msa_fill_w(r0[4]);
v4i32 _val0_5 = __msa_fill_w(r0[5]);
v4i32 _val0_6 = __msa_fill_w(r0[6]);
v4i32 _val0_7 = __msa_fill_w(r0[7]);
v4i32 _val1_0 = __msa_fill_w(r0[8]);
v4i32 _val1_1 = __msa_fill_w(r0[9]);
v4i32 _val1_2 = __msa_fill_w(r0[10]);
v4i32 _val1_3 = __msa_fill_w(r0[11]);
v4i32 _val1_4 = __msa_fill_w(r0[12]);
v4i32 _val1_5 = __msa_fill_w(r0[13]);
v4i32 _val1_6 = __msa_fill_w(r0[14]);
v4i32 _val1_7 = __msa_fill_w(r0[15]);
_sum0 = __msa_maddv_w(_sum0, _w0l, _val0_0);
_sum1 = __msa_maddv_w(_sum1, _w0h, _val0_1);
_sum2 = __msa_maddv_w(_sum2, _w0l, _val1_0);
_sum3 = __msa_maddv_w(_sum3, _w0h, _val1_1);
_sum0 = __msa_maddv_w(_sum0, _w1l, _val0_2);
_sum1 = __msa_maddv_w(_sum1, _w1h, _val0_3);
_sum2 = __msa_maddv_w(_sum2, _w1l, _val1_2);
_sum3 = __msa_maddv_w(_sum3, _w1h, _val1_3);
_sum0 = __msa_maddv_w(_sum0, _w2l, _val0_4);
_sum1 = __msa_maddv_w(_sum1, _w2h, _val0_5);
_sum2 = __msa_maddv_w(_sum2, _w2l, _val1_4);
_sum3 = __msa_maddv_w(_sum3, _w2h, _val1_5);
_sum0 = __msa_maddv_w(_sum0, _w3l, _val0_6);
_sum1 = __msa_maddv_w(_sum1, _w3h, _val0_7);
_sum2 = __msa_maddv_w(_sum2, _w3l, _val1_6);
_sum3 = __msa_maddv_w(_sum3, _w3h, _val1_7);
r0 += 16;
k0 += 32;
}
_sum0 = __msa_addv_w(_sum0, _sum1);
_sum2 = __msa_addv_w(_sum2, _sum3);
int sum[8];
__msa_st_w(_sum0, sum, 0);
__msa_st_w(_sum2, sum + 4, 0);
output0_tm[0] = sum[0];
output1_tm[0] = sum[1];
output2_tm[0] = sum[2];
output3_tm[0] = sum[3];
output0_tm[1] = sum[4];
output1_tm[1] = sum[5];
output2_tm[1] = sum[6];
output3_tm[1] = sum[7];
output0_tm += 2;
output1_tm += 2;
output2_tm += 2;
output3_tm += 2;
}
for (; i < tiles; i++)
{
const short* r0 = bb2.row<const short>(i / 2 + i % 2);
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
v4i32 _sum0 = __msa_fill_w(0);
v4i32 _sum1 = __msa_fill_w(0);
for (int j = 0; j < nn; j++)
{
__builtin_prefetch(r0 + 32);
__builtin_prefetch(k0 + 128);
v8i16 _w0 = __msa_ld_h(k0, 0);
v8i16 _w1 = __msa_ld_h(k0 + 8, 0);
v8i16 _w2 = __msa_ld_h(k0 + 16, 0);
v8i16 _w3 = __msa_ld_h(k0 + 24, 0);
v8i16 _extw0 = __msa_clti_s_h(_w0, 0);
v8i16 _extw1 = __msa_clti_s_h(_w1, 0);
v8i16 _extw2 = __msa_clti_s_h(_w2, 0);
v8i16 _extw3 = __msa_clti_s_h(_w3, 0);
v4i32 _w0l = (v4i32)__msa_ilvr_h(_extw0, _w0);
v4i32 _w0h = (v4i32)__msa_ilvl_h(_extw0, _w0);
v4i32 _w1l = (v4i32)__msa_ilvr_h(_extw1, _w1);
v4i32 _w1h = (v4i32)__msa_ilvl_h(_extw1, _w1);
v4i32 _w2l = (v4i32)__msa_ilvr_h(_extw2, _w2);
v4i32 _w2h = (v4i32)__msa_ilvl_h(_extw2, _w2);
v4i32 _w3l = (v4i32)__msa_ilvr_h(_extw3, _w3);
v4i32 _w3h = (v4i32)__msa_ilvl_h(_extw3, _w3);
v4i32 _val0 = __msa_fill_w(r0[0]);
v4i32 _val1 = __msa_fill_w(r0[1]);
v4i32 _val2 = __msa_fill_w(r0[2]);
v4i32 _val3 = __msa_fill_w(r0[3]);
v4i32 _val4 = __msa_fill_w(r0[4]);
v4i32 _val5 = __msa_fill_w(r0[5]);
v4i32 _val6 = __msa_fill_w(r0[6]);
v4i32 _val7 = __msa_fill_w(r0[7]);
_sum0 = __msa_maddv_w(_sum0, _w0l, _val0);
_sum1 = __msa_maddv_w(_sum1, _w0h, _val1);
_sum0 = __msa_maddv_w(_sum0, _w1l, _val2);
_sum1 = __msa_maddv_w(_sum1, _w1h, _val3);
_sum0 = __msa_maddv_w(_sum0, _w2l, _val4);
_sum1 = __msa_maddv_w(_sum1, _w2h, _val5);
_sum0 = __msa_maddv_w(_sum0, _w3l, _val6);
_sum1 = __msa_maddv_w(_sum1, _w3h, _val7);
r0 += 8;
k0 += 32;
}
_sum0 = __msa_addv_w(_sum0, _sum1);
int sum[4];
__msa_st_w(_sum0, sum, 0);
output0_tm[0] = sum[0];
output1_tm[0] = sum[1];
output2_tm[0] = sum[2];
output3_tm[0] = sum[3];
output0_tm += 1;
output1_tm += 1;
output2_tm += 1;
output3_tm += 1;
}
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 1 < tiles; i += 2)
{
const short* r0 = bb2.row<const short>(i / 2);
const short* k0 = kernel0_tm.row<const short>(r);
v4i32 _sum0 = __msa_fill_w(0);
v4i32 _sum1 = __msa_fill_w(0);
v4i32 _sum2 = __msa_fill_w(0);
v4i32 _sum3 = __msa_fill_w(0);
for (int q = 0; q < inch; q++)
{
__builtin_prefetch(r0 + 32);
__builtin_prefetch(k0 + 64);
v8i16 _val0 = __msa_ld_h(r0, 0);
v8i16 _val1 = __msa_ld_h(r0 + 8, 0);
v8i16 _extval0 = __msa_clti_s_h(_val0, 0);
v8i16 _extval1 = __msa_clti_s_h(_val1, 0);
v4i32 _val0l = (v4i32)__msa_ilvr_h(_extval0, _val0);
v4i32 _val0h = (v4i32)__msa_ilvl_h(_extval0, _val0);
v4i32 _val1l = (v4i32)__msa_ilvr_h(_extval1, _val1);
v4i32 _val1h = (v4i32)__msa_ilvl_h(_extval1, _val1);
v8i16 _w0 = __msa_ld_h(k0, 0);
v8i16 _extw0 = __msa_clti_s_h(_w0, 0);
v4i32 _w0l = (v4i32)__msa_ilvr_h(_extw0, _w0);
v4i32 _w0h = (v4i32)__msa_ilvl_h(_extw0, _w0);
_sum0 = __msa_maddv_w(_sum0, _w0l, _val0l);
_sum1 = __msa_maddv_w(_sum1, _w0h, _val0h);
_sum2 = __msa_maddv_w(_sum2, _w0l, _val1l);
_sum3 = __msa_maddv_w(_sum3, _w0h, _val1h);
k0 += 8;
r0 += 16;
}
_sum0 = __msa_addv_w(_sum0, _sum1);
_sum2 = __msa_addv_w(_sum2, _sum3);
output0_tm[0] = __msa_reduce_add_w(_sum0);
output0_tm[1] = __msa_reduce_add_w(_sum2);
output0_tm += 2;
}
for (; i < tiles; i++)
{
const short* r0 = bb2.row<const short>(i / 2 + i % 2);
const short* k0 = kernel0_tm.row<const short>(r);
v4i32 _sum0 = __msa_fill_w(0);
v4i32 _sum1 = __msa_fill_w(0);
for (int q = 0; q < inch; q++)
{
__builtin_prefetch(r0 + 32);
__builtin_prefetch(k0 + 32);
v8i16 _val = __msa_ld_h(r0, 0);
v8i16 _extval = __msa_clti_s_h(_val, 0);
v4i32 _vall = (v4i32)__msa_ilvr_h(_extval, _val);
v4i32 _valh = (v4i32)__msa_ilvl_h(_extval, _val);
v8i16 _w0 = __msa_ld_h(k0, 0);
v8i16 _extw0 = __msa_clti_s_h(_w0, 0);
v4i32 _w0l = (v4i32)__msa_ilvr_h(_extw0, _w0);
v4i32 _w0h = (v4i32)__msa_ilvl_h(_extw0, _w0);
_sum0 = __msa_maddv_w(_sum0, _w0l, _vall);
_sum1 = __msa_maddv_w(_sum1, _w0h, _valh);
k0 += 8;
r0 += 8;
}
_sum0 = __msa_addv_w(_sum0, _sum1);
output0_tm[0] = __msa_reduce_add_w(_sum0);
output0_tm++;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
int tmp[4][6];
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator);
const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 1;
const int* output0_tm_1 = output0_tm_0 + tiles * 1;
const int* output0_tm_2 = output0_tm_0 + tiles * 2;
const int* output0_tm_3 = output0_tm_0 + tiles * 3;
const int* output0_tm_4 = output0_tm_0 + tiles * 4;
const int* output0_tm_5 = output0_tm_0 + tiles * 5;
int* output0 = out0.row<int>(i * 4) + j * 4;
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
// TODO msa optimize
for (int m = 0; m < 5; m++)
{
int tmp02a = output0_tm_1[0] + output0_tm_2[0];
int tmp13a = output0_tm_1[0] - output0_tm_2[0];
int tmp02b = output0_tm_3[0] + output0_tm_4[0];
int tmp13b = output0_tm_3[0] - output0_tm_4[0];
tmp[0][m] = output0_tm_0[0] + tmp02a + tmp02b;
tmp[1][m] = tmp13a + tmp13b * 2;
tmp[2][m] = tmp02a + tmp02b * 4;
tmp[3][m] = output0_tm_5[0] * 4 + tmp13a + tmp13b * 8;
output0_tm_0 += tiles * 6;
output0_tm_1 += tiles * 6;
output0_tm_2 += tiles * 6;
output0_tm_3 += tiles * 6;
output0_tm_4 += tiles * 6;
output0_tm_5 += tiles * 6;
}
for (int m = 5; m < 6; m++)
{
int tmp02a = output0_tm_1[0] + output0_tm_2[0];
int tmp13a = output0_tm_1[0] - output0_tm_2[0];
int tmp02b = output0_tm_3[0] + output0_tm_4[0];
int tmp13b = output0_tm_3[0] - output0_tm_4[0];
tmp[0][m] = (output0_tm_0[0] + tmp02a + tmp02b) * 4;
tmp[1][m] = (tmp13a + tmp13b * 2) * 4;
tmp[2][m] = (tmp02a + tmp02b * 4) * 4;
tmp[3][m] = (output0_tm_5[0] * 4 + tmp13a + tmp13b * 8) * 4;
output0_tm_0 += tiles * 6;
output0_tm_1 += tiles * 6;
output0_tm_2 += tiles * 6;
output0_tm_3 += tiles * 6;
output0_tm_4 += tiles * 6;
output0_tm_5 += tiles * 6;
}
for (int m = 0; m < 4; m++)
{
const int* tmp0 = tmp[m];
int tmp02a = tmp0[1] + tmp0[2];
int tmp13a = tmp0[1] - tmp0[2];
int tmp02b = tmp0[3] + tmp0[4];
int tmp13b = tmp0[3] - tmp0[4];
output0[0] = (tmp0[0] + tmp02a + tmp02b) / 576;
output0[1] = (tmp13a + tmp13b * 2) / 576;
output0[2] = (tmp02a + tmp02b * 4) / 576;
output0[3] = (tmp0[5] + tmp13a + tmp13b * 8) / 576;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
pIMH.h | #ifndef pIMH_hpp
#define pIMH_hpp
#include "Distributions.h"
#include<mkl.h>
#include<omp.h>
#include<stdio.h>
#include<random>
#include<numeric>
#include<vector>
#include<array>
#include<utility>
using namespace std;
namespace Markov
{
/**
*@author: Emily Jakobs
*@brief: a class to run the perfect independent Metropolis Hastings algorithm
developed by Jem Corcoran and R.L. Tweedie. Original paper at
https://projecteuclid.org/euclid.aoap/1015345299#abstract
*/
template<typename CandidateDist, typename TargetDist>
class IMH
{
protected:
/*
1.11 is default so we can check if it hasn't been initialized
Usually, best practices would be to use boost::optional<double>
if you don't have C++17, or some of your program is old and
won't work with new std types, or std::optional<double>
if you're compiling with C++17 or later. For this class though, we're going
to use a preset value .*/
double lower_bound{ 1.11 };
CandidateDist Q;
TargetDist pi;
public:
constexpr IMH(const CandidateDist& _Q, const TargetDist& _pi,double lb) : Q(_Q), pi(_pi), lower_bound(lb) {}
constexpr auto MH_ratio(const double x, const double y) const noexcept
{
//std::cout << pi.pdf(y) << " " << Q.pdf(x) << "\n";
return (pi.pdf(y)*Q.pdf(x))/(pi.pdf(x)*Q.pdf(y));
}
/**
*@author: Emily Jakobs
*@algorithm by Corcoran and Tweedie
*@return: "larger" of x and y according to the partial order for perfect IMH
*/
constexpr auto partial_order(const double x, const double y) const noexcept
{
return MH_ratio( x, y) >= 1 ? x : y;
}
/**
*@author: Zane Jakobs
* @brief: alpha(x,y) in the paper
*@return: min(1, MH_ratio)
*/
constexpr auto accceptance_threshold(const double x, const double y) const noexcept
{
auto ratio = MH_ratio(x, y);
return ratio < 1.0 ? ratio : static_cast<double>(1.0);
}
/**
*@author: Emily Jakobs
*@brief: finds \ell from the paper, lower bound on reordered sample space
*/
//constexpr auto find_lower_bound(const TargetDist& _pi) const noexcept;
/**
*@author: Emily Jakobs
*@brief: runs the classical Metropolis Hastings algorithm with
* a symmetric transition kernel from time t = -n to 0 with pre-chosen
* samples from the uniform and from the candidate
*/
constexpr auto MH_from_past(int& n, const std::vector<double>& qvec, const std::vector<double>& avec) const noexcept
{
auto vlen = qvec.size() - 1;
auto state = lower_bound;
//std::cout << "MHFP\n";
for(int t = 0; t <= n; t++){
//compiler will optimize this to not declare a new one each loop
auto threshold = accceptance_threshold(state, qvec[vlen - n +t] );
if(avec[vlen - n + t] < threshold){
state = qvec[vlen - n + t];
}
}//end for
return state;
}
/**
*@author: Emily Jakobs
*@brief: runs the perfect IMH algorithm once
*/
auto perfect_IMH_sample(unsigned initial_len, pair<default_random_engine, uniform_real_distribution<double> >& spar) const noexcept
{
auto avec = Markov::uniform_sample_vector(spar, initial_len);
auto qvec = Q.create_sample_vector(initial_len);
bool accepted_first = false;
int n = 1;
// #pragma omp parallel
while(!accepted_first){
//vlen is "time 0"
auto vlen = avec.size() - 1;
//update vectors if we hit the end of them
if(n == vlen){
avec = Markov::update_uniform_sample_vector(avec, spar, initial_len);
Q.update_sample_vector(qvec, initial_len);
vlen += initial_len;
}/*
std::cout << "Large n, printing qvec[vlen-n] \n";
std::cout << qvec[vlen-n] << "\n";
std::cout << "Printing acceptance_threshold\n";
std::cout << accceptance_threshold(lower_bound, qvec[vlen - n]);*/
auto threshold = accceptance_threshold(lower_bound, qvec[vlen - n]);
//if the first transition from time -n is accepted, we have converged
if(avec[vlen - n] < threshold ){
accepted_first = true;
//std::cout << n <<"\n";
} else{
n++;//if we reject the transition, move back one in time
}
}//end while
auto sample = MH_from_past(n, qvec, avec);
return sample;
}
auto perfect_IMH_sample_vector(unsigned samples, unsigned initial_len = 100) const noexcept
{
auto sampler = std_sampler_pair();
vector<double> sampleContainer(samples);
// #pragma omp parallel num_threads(4)
//{
// #pragma omp for
for(int i = 0; i < samples; i++){
sampleContainer[i] = perfect_IMH_sample(initial_len, sampler);
}
// }
return sampleContainer;
}
};
}//end namespace scope
#endif /* MetropolisHastings_hpp */
|
omp_taskyield2.c | <ompts:test>
<ompts:testdescription>Test taskyield directive. First generate a set of tasks and pause it immediately. Then we resume half of them and check whether they are scheduled by different threads</ompts:testdescription>
<ompts:ompversion>3.0</ompts:ompversion>
<ompts:directive>omp taskyield</ompts:directive>
<ompts:dependences>omp taskwait</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
#include "timer.h"
int <ompts:testcode:functionname>omp_taskyield</ompts:testcode:functionname>(FILE * logFile){
<ompts:orphan:vars>
int go=0;
double main_task_finish_time=0;
double long_child_finish_time=0;
</ompts:orphan:vars>
#pragma omp parallel num_threads(2)
{
#pragma omp single nowait
{ <ompts:orphan>
#pragma omp task
{ fprintf(logFile," %lf create parent task by thread %d\n",timer(),omp_get_thread_num());
#pragma omp task shared(go)
{
fprintf(logFile," %lf create small task by thread %d\n",timer(),omp_get_thread_num());
while (go<1)
{
}// end of while
my_sleep (SLEEPTIME);
fprintf(logFile," %lf finish small task by thread %d\n",timer(),omp_get_thread_num());
}
#pragma omp task shared(go)
{
fprintf(logFile," %lf create big task by thread %d\n",timer(),omp_get_thread_num());
go=1;
my_sleep(SLEEPTIME_LONG);
long_child_finish_time=timer();
fprintf(logFile," %lf finish big task by thread %d\n",timer(),omp_get_thread_num());
}
<ompts:check>#pragma omp taskyield</ompts:check>
my_sleep (SLEEPTIME);
main_task_finish_time=timer();
fprintf(logFile," %lf finish parent task by thread %d\n",timer(),omp_get_thread_num());
}/* end of omp main task */
</ompts:orphan>
} /* end of single */
} /* end of parallel */
return (main_task_finish_time>long_child_finish_time);
}
</ompts:testcode>
</ompts:test>
|
plot.h | #ifndef OPENMC_PLOT_H
#define OPENMC_PLOT_H
#include <unordered_map>
#include <sstream>
#include "pugixml.hpp"
#include "xtensor/xarray.hpp"
#include "hdf5.h"
#include "openmc/position.h"
#include "openmc/constants.h"
#include "openmc/cell.h"
#include "openmc/error.h"
#include "openmc/geometry.h"
#include "openmc/particle.h"
#include "openmc/xml_interface.h"
#include "openmc/random_lcg.h"
namespace openmc {
//===============================================================================
// Global variables
//===============================================================================
class Plot;
namespace model {
extern std::vector<Plot> plots; //!< Plot instance container
extern std::unordered_map<int, int> plot_map; //!< map of plot ids to index
extern uint64_t plotter_prn_seeds[N_STREAMS]; // Random number seeds used for plotter
extern int plotter_stream; // Stream index used by the plotter
} // namespace model
//===============================================================================
// RGBColor holds color information for plotted objects
//===============================================================================
struct RGBColor {
//Constructors
RGBColor() : red(0), green(0), blue(0) { };
RGBColor(const int v[3]) : red(v[0]), green(v[1]), blue(v[2]) { };
RGBColor(int r, int g, int b) : red(r), green(g), blue(b) { };
RGBColor(const std::vector<int> &v) {
if (v.size() != 3) {
throw std::out_of_range("Incorrect vector size for RGBColor.");
}
red = v[0];
green = v[1];
blue = v[2];
}
bool operator ==(const RGBColor& other) {
return red == other.red && green == other.green && blue == other.blue;
}
// Members
uint8_t red, green, blue;
};
// some default colors
const RGBColor WHITE {255, 255, 255};
const RGBColor RED {255, 0, 0};
typedef xt::xtensor<RGBColor, 2> ImageData;
struct IdData {
// Constructor
IdData(size_t h_res, size_t v_res);
// Methods
void set_value(size_t y, size_t x, const Particle& p, int level);
void set_overlap(size_t y, size_t x);
// Members
xt::xtensor<int32_t, 3> data_; //!< 2D array of cell & material ids
};
struct PropertyData {
// Constructor
PropertyData(size_t h_res, size_t v_res);
// Methods
void set_value(size_t y, size_t x, const Particle& p, int level);
void set_overlap(size_t y, size_t x);
// Members
xt::xtensor<double, 3> data_; //!< 2D array of temperature & density data
};
enum class PlotType {
slice = 1,
voxel = 2
};
enum class PlotBasis {
xy = 1,
xz = 2,
yz = 3
};
enum class PlotColorBy {
cells = 0,
mats = 1
};
//===============================================================================
// Plot class
//===============================================================================
class PlotBase {
public:
template<class T> T get_map() const;
// Members
public:
Position origin_; //!< Plot origin in geometry
Position width_; //!< Plot width in geometry
PlotBasis basis_; //!< Plot basis (XY/XZ/YZ)
std::array<size_t, 3> pixels_; //!< Plot size in pixels
bool color_overlaps_; //!< Show overlapping cells?
int level_; //!< Plot universe level
};
template<class T>
T PlotBase::get_map() const {
size_t width = pixels_[0];
size_t height = pixels_[1];
// get pixel size
double in_pixel = (width_[0])/static_cast<double>(width);
double out_pixel = (width_[1])/static_cast<double>(height);
// size data array
T data(width, height);
// setup basis indices and initial position centered on pixel
int in_i, out_i;
Position xyz = origin_;
switch(basis_) {
case PlotBasis::xy :
in_i = 0;
out_i = 1;
break;
case PlotBasis::xz :
in_i = 0;
out_i = 2;
break;
case PlotBasis::yz :
in_i = 1;
out_i = 2;
break;
default:
UNREACHABLE();
}
// set initial position
xyz[in_i] = origin_[in_i] - width_[0] / 2. + in_pixel / 2.;
xyz[out_i] = origin_[out_i] + width_[1] / 2. - out_pixel / 2.;
// arbitrary direction
Direction dir = {0.7071, 0.7071, 0.0};
#pragma omp parallel
{
Particle p;
p.r() = xyz;
p.u() = dir;
p.coord_[0].universe = model::root_universe;
int level = level_;
int j{};
#pragma omp for
for (int y = 0; y < height; y++) {
p.r()[out_i] = xyz[out_i] - out_pixel * y;
for (int x = 0; x < width; x++) {
p.r()[in_i] = xyz[in_i] + in_pixel * x;
p.n_coord_ = 1;
// local variables
bool found_cell = find_cell(p, 0);
j = p.n_coord_ - 1;
if (level >=0) {j = level + 1;}
if (found_cell) {
data.set_value(y, x, p, j);
}
if (color_overlaps_ && check_cell_overlap(p, false)) {
data.set_overlap(y, x);
}
} // inner for
} // outer for
} // omp parallel
return data;
}
class Plot : public PlotBase {
public:
// Constructor
Plot(pugi::xml_node plot);
// Methods
private:
void set_id(pugi::xml_node plot_node);
void set_type(pugi::xml_node plot_node);
void set_output_path(pugi::xml_node plot_node);
void set_bg_color(pugi::xml_node plot_node);
void set_basis(pugi::xml_node plot_node);
void set_origin(pugi::xml_node plot_node);
void set_width(pugi::xml_node plot_node);
void set_universe(pugi::xml_node plot_node);
void set_default_colors(pugi::xml_node plot_node);
void set_user_colors(pugi::xml_node plot_node);
void set_meshlines(pugi::xml_node plot_node);
void set_mask(pugi::xml_node plot_node);
void set_overlap_color(pugi::xml_node plot_node);
// Members
public:
int id_; //!< Plot ID
PlotType type_; //!< Plot type (Slice/Voxel)
PlotColorBy color_by_; //!< Plot coloring (cell/material)
int meshlines_width_; //!< Width of lines added to the plot
int index_meshlines_mesh_ {-1}; //!< Index of the mesh to draw on the plot
RGBColor meshlines_color_; //!< Color of meshlines on the plot
RGBColor not_found_ {WHITE}; //!< Plot background color
RGBColor overlap_color_ {RED}; //!< Plot overlap color
std::vector<RGBColor> colors_; //!< Plot colors
std::string path_plot_; //!< Plot output filename
};
//===============================================================================
// Non-member functions
//===============================================================================
//! Add mesh lines to image data of a plot object
//! \param[in] plot object
//! \param[out] image data associated with the plot object
void draw_mesh_lines(Plot pl, ImageData& data);
//! Write a ppm image to file using a plot object's image data
//! \param[in] plot object
//! \param[out] image data associated with the plot object
void output_ppm(Plot pl, const ImageData& data);
//! Initialize a voxel file
//! \param[in] id of an open hdf5 file
//! \param[in] dimensions of the voxel file (dx, dy, dz)
//! \param[out] dataspace pointer to voxel data
//! \param[out] dataset pointer to voxesl data
//! \param[out] pointer to memory space of voxel data
void voxel_init(hid_t file_id, const hsize_t* dims, hid_t* dspace,
hid_t* dset, hid_t* memspace);
//! Write a section of the voxel data to hdf5
//! \param[in] voxel slice
//! \param[out] dataspace pointer to voxel data
//! \param[out] dataset pointer to voxesl data
//! \param[out] pointer to data to write
void voxel_write_slice(int x, hid_t dspace, hid_t dset,
hid_t memspace, void* buf);
//! Close voxel file entities
//! \param[in] data space to close
//! \param[in] dataset to close
//! \param[in] memory space to close
void voxel_finalize(hid_t dspace, hid_t dset, hid_t memspace);
//===============================================================================
// External functions
//===============================================================================
//! Read plot specifications from a plots.xml file
void read_plots_xml();
//! Create a ppm image for a plot object
//! \param[in] plot object
void create_ppm(Plot pl);
//! Create an hdf5 voxel file for a plot object
//! \param[in] plot object
void create_voxel(Plot pl);
//! Create a randomly generated RGB color
//! \return RGBColor with random value
RGBColor random_color();
} // namespace openmc
#endif // OPENMC_PLOT_H
|
GB_unaryop__identity_uint8_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint8_int32
// op(A') function: GB_tran__identity_uint8_int32
// C type: uint8_t
// A type: int32_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint8_int32
(
uint8_t *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint8_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__identity_int8_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int8_uint16
// op(A') function: GB_tran__identity_int8_uint16
// C type: int8_t
// A type: uint16_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int8_uint16
(
int8_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int8_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
direct_method.c | #include "direct_method.h"
#include "IO.h"
#include "mpi.h"
#include "omp.h"
/* Here are the initialization of the global variables: */
bodies_t bodies;
char *Direct_data_file;
bool Direct_are_data_bzipped2 = FALSE;
position_t center;
COORDINATES_T half_side;
extern int nb_proc;
extern int my_rank;
FMB_Info_t FMB_Info;
/* Buffer des Pj et Fj pour les calculs distants */
COORDINATES_T *pj_pos_x;
COORDINATES_T *pj_pos_y;
COORDINATES_T *pj_pos_z;
COORDINATES_T *pj_fx;
COORDINATES_T *pj_fy;
COORDINATES_T *pj_fz;
/* L'ensemble des masses */
VALUES_T *p_allvalues;
/* pointeur vers les masses courantes */
VALUES_T *p_values;
/* See definition in 'FMB.c'. */
void bunzip2_file(const char *filename);
void bzip2_file(const char *filename);
#define SWAP(x1, y1, z1, v1, x2, y2, z2, v2, tmp) tmp=x1; x1=x2; x2=tmp; tmp=y1; y1=y2; y2=tmp; tmp=z1; z1=z2; z2=tmp;
/*********************************************************************************************
**********************************************************************************************
Direct_method_Init
**********************************************************************************************
*********************************************************************************************/
void Direct_method_Init(){
/* Checking: */
if (f_output == NULL){
FMB_error("'f_output' must be set.\n");
}
/************************************ eps_soft_square: **********************************************/
fprintf(f_output, "Softening parameter: %.1e\n", FMB_Info.eps_soft);
FMB_Info.eps_soft_square = FMB_Info.eps_soft * FMB_Info.eps_soft;
/* Clear 'center' and 'half_side': */
position_Initialize(¢er);
half_side = (COORDINATES_T) 0.0;
}
/*********************************************************************************************
**********************************************************************************************
Direct_method_Data
**********************************************************************************************
*********************************************************************************************/
/* Initialisation des buffers Pj */
void Direct_method_Pj_Initialize(bodies_ind_t nb_bodies)
{
bodies_ind_t k;
pj_pos_x = FMB_malloc_with_check(nb_bodies * sizeof(COORDINATES_T));
pj_pos_y = FMB_malloc_with_check(nb_bodies * sizeof(COORDINATES_T));
pj_pos_z = FMB_malloc_with_check(nb_bodies * sizeof(COORDINATES_T));
pj_fx = FMB_malloc_with_check(nb_bodies * sizeof(COORDINATES_T));
pj_fy = FMB_malloc_with_check(nb_bodies * sizeof(COORDINATES_T));
pj_fz = FMB_malloc_with_check(nb_bodies * sizeof(COORDINATES_T));
for (k = 0; k < nb_bodies; ++k)
{
pj_pos_x[k] = 0;
pj_pos_y[k] = 0;
pj_pos_z[k] = 0;
pj_fx[k] = 0;
pj_fy[k] = 0;
pj_fz[k] = 0;
}
}
/* Free les Pj*/
void Direct_method_Pj_Terminate()
{
FMB_free(pj_pos_x);
FMB_free(pj_pos_y);
FMB_free(pj_pos_z);
FMB_free(pj_fx);
FMB_free(pj_fy);
FMB_free(pj_fz);
}
/* Send des données depuis un buffer d'envoi du processus 0 au processus p*/
void Direct_method_Data_Send(int p, position_t *spd, MPI_Request *req)
{
MPI_Isend(pj_pos_x, bodies.nb_bodies, MY_MPI_F, p, 1, MPI_COMM_WORLD, req);
MPI_Isend(pj_pos_y, bodies.nb_bodies, MY_MPI_F, p, 2, MPI_COMM_WORLD, req+1);
MPI_Isend(pj_pos_z, bodies.nb_bodies, MY_MPI_F, p, 3, MPI_COMM_WORLD, req+2);
MPI_Isend(spd, bodies.nb_bodies * 3, MY_MPI_F, p, 5, MPI_COMM_WORLD, req+3);
}
/* Initialisation des corps et reception des données */
/* utilisé par les processus autres que 0 */
void Direct_method_Data_InitRecv()
{
bodies_ind_t nb_bodies;
bodies_ind_t nb_bodies_total;
MPI_Request req[4];
/* Recuperer le nombre de corps local */
MPI_Bcast(&nb_bodies, 1, MPI_LONG, 0, MPI_COMM_WORLD);
/* retrouver le nombre de corps total et allouer les masses */
nb_bodies_total = nb_bodies * nb_proc;
p_allvalues = FMB_malloc_with_check(nb_bodies_total * sizeof(VALUES_T));
/* Allocation et initialisation des corps */
bodies_Initialize(&bodies, nb_bodies);
bodies.nb_bodies = nb_bodies;
bodies.size_allocated = nb_bodies;
bodies.p_values = p_allvalues + nb_bodies * (my_rank - 1);
/* On place le pointeur des masses */
p_values = bodies.p_values + nb_bodies;
/* Reception des corps (position et vitesses) */
MPI_Irecv(bodies.p_pos_x, nb_bodies, MY_MPI_F, 0, 1, MPI_COMM_WORLD, req);
MPI_Irecv(bodies.p_pos_y, nb_bodies, MY_MPI_F, 0, 2, MPI_COMM_WORLD, req+1);
MPI_Irecv(bodies.p_pos_z, nb_bodies, MY_MPI_F, 0, 3, MPI_COMM_WORLD, req+2);
MPI_Irecv(bodies.p_speed_vectors, nb_bodies * 3, MY_MPI_F, 0, 5, MPI_COMM_WORLD, req+3); /* Utiliser un type MPI_STRUCT si machines non homogènes */
bodies_ClearFP(&bodies);
/* Recuperation des masses */
MPI_Bcast(p_allvalues, nb_bodies_total, MY_MPI_F, 0, MPI_COMM_WORLD);
/* Initialisation des buffers Pj */
Direct_method_Pj_Initialize(nb_bodies);
/* On termine la reception */
MPI_Wait(req, NULL);
MPI_Wait(req+1, NULL);
MPI_Wait(req+2, NULL);
MPI_Wait(req+3, NULL);
}
/* Lecture et envoie des données */
/* Appelé uniquement par le processus 0 */
void Direct_method_Data(char *data_file){
bodies_ind_t k;
bodies_ind_t nb_bodies;
int i;
VALUES_T *current_values;
void *swap;
position_t *spd;
MPI_Request id[4];
if (INFO_DISPLAY(2)){
fprintf(f_output, "Opening data file \'%s\' for direct computation... \n", data_file);
}
/* Initialize Input operations: */
FMB_IO_InitI(data_file);
FMB_IO_Scan_header(&nb_bodies, ¢er, &half_side);
if (INFO_DISPLAY(1)){
fprintf(f_output, "Bodies number: ");
fprintf(f_output, FORMAT_BODIES_IND_T, nb_bodies);
fprintf(f_output, "\n");
fflush(f_output);
}
/* Allocation des masses */
p_allvalues = FMB_malloc_with_check(nb_bodies * sizeof(VALUES_T));
/* Positionnement du pointeur des masses courantes */
p_values = p_allvalues;
current_values = p_allvalues;
/* nombre local de corps */
nb_bodies /= nb_proc;
/* On envoie le nombre de corps */
MPI_Bcast(&nb_bodies, 1, MPI_LONG, 0, MPI_COMM_WORLD);
/* On initialise les corps et les buffers Pj et spd */
bodies_Initialize(&bodies, nb_bodies);
Direct_method_Pj_Initialize(nb_bodies);
spd = FMB_malloc_with_check(nb_bodies * sizeof(position_t));
/* Pour chaque processus */
for (i = 1; i <= nb_proc; ++i)
{
/* On remplit bodies des nb_bodies corps suivants */
bodies.nb_bodies = 0;
bodies.p_values = current_values; /* On place le pointeur de masse au bon endroit du buffer */
for (k=0; k<nb_bodies; ++k)
{
body_t body_tmp;
body_Initialize(&body_tmp);
if (FMB_IO_Scan_body(&body_tmp) != 1)
FMB_error("In Direct_method_Data(): FMB_IO_Scan_body() failed for body #%i\n", k);
/* if (k<100){ body_Display(&body_tmp, f_output); } */
bodies_Add(&bodies, &body_tmp);
}
/* On les envoie (en asynchrone) */
if (i != 1)
{
MPI_Wait(id,NULL); MPI_Wait(id+1,NULL);
MPI_Wait(id+2,NULL); MPI_Wait(id+3,NULL);
}
if (i != nb_proc)
{
SWAP(bodies.p_pos_x, bodies.p_pos_y, bodies.p_pos_z, bodies.p_speed_vectors,
pj_pos_x, pj_pos_y, pj_pos_z, spd, swap);
Direct_method_Data_Send(i, spd, id);
}
current_values += nb_bodies;
}
/* Du coup le processus 0 conserve les derniers corps. Il aurait été malin que ce soit le processus nb_proc - 1 qui fasse la lecture. */
/* Chaque processus doit récuperer l'ensemble des masses. */
MPI_Bcast(p_allvalues, nb_bodies * nb_proc, MY_MPI_F, 0, MPI_COMM_WORLD);
bodies_ClearFP(&bodies);
FMB_free(spd);
/* Terminate Input operations: */
FMB_IO_TerminateI();
}
/*********************************************************************************************
********************************************************************************************
**********************************************************************************************
Direct_method_Data_bodies
**********************************************************************************************
*********************************************************************************************/
/* Same as Direct_method_Data() but we use the position and values
* of all bodies stored in 'p_b' (instead of the bodies stored
* in the file "data_file" in Direct_method_Data()). */
void Direct_method_Data_bodies(bodies_t *p_b){
bodies_it_t it;
bodies_Initialize(&bodies, bodies_Nb_bodies(p_b));
for (bodies_it_Initialize(&it, p_b);
bodies_it_Is_valid(&it);
bodies_it_Go2Next(&it)){
body_t body_tmp;
bodies_it_Get_body(&it, &body_tmp);
bodies_Add(&bodies, &body_tmp);
}
bodies_ClearFP(&bodies);
}
/*********************************************************************************************
**********************************************************************************************
Direct_method_Compute
**********************************************************************************************
*********************************************************************************************/
void Direct_method_Compute_First(){
/********************* Without reciprocity: *******************************************/
/* bodies_Compute_own_interaction_no_mutual() is not implemented ... */
/********************* With reciprocity: **********************************************/
/* Compute the force and the potential: */
bodies_Compute_own_interaction(&bodies);
/**************** Possible scaling with CONSTANT_INTERACTION_FACTOR: ********************/
/* We can also use CONSTANT_INTERACTION_FACTOR only for the total potential energy ... */
#ifdef _USE_CONSTANT_INTERACTION_FACTOR_
bodies_Scale_with_CONSTANT_INTERACTION_FACTOR(&bodies);
#endif /* #ifdef _USE_CONSTANT_INTERACTION_FACTOR_ */
}
/* Computation pour les blocs intermédiaire */
void Direct_method_Compute_Mid()
{
bodies_Compute_other_interaction(&bodies, pj_pos_x, pj_pos_y, pj_pos_z, pj_fx, pj_fy, pj_fz, p_values);
p_values += bodies.nb_bodies;
if (p_values == p_allvalues + bodies.nb_bodies * nb_proc)
p_values = p_allvalues;
}
/* Computation pour le demi-bloc final */
void Direct_method_Compute_Last()
{
int rank = (my_rank == 0 ? nb_proc : my_rank);
bodies_Compute_other_half_interaction(&bodies, pj_pos_x, pj_pos_y, pj_pos_z, pj_fx, pj_fy, pj_fz, p_values, rank <= (nb_proc / 2) ? 0 : 1);
}
/*********************************************************************************************
**********************************************************************************************
************************* Move of the bodies: ************************************************
Direct_method_Move : Leapfrog integrator ( Kick Drift Kick )
**********************************************************************************************
*********************************************************************************************/
void KnD_Direct_method_Move(REAL_T dt ){
/**** Kick N Drift ***/
bodies_it_t it;
for (bodies_it_Initialize(&it, &bodies);
bodies_it_Is_valid(&it);
bodies_it_Go2Next(&it)){
bodies_Kick_Move(&it,dt);
bodies_Drift_Move(&it,dt);
}
/*
bodies_ind_t k;
#pragma omp parallel for schedule(static)
for (k = 0; k < bodies.nb_bodies; ++k)
{
bodies.p_speed_vectors[k].dat[0] += bodies.p_fx[k] * (1 / bodies.p_values[k]) * (dt / 2);
bodies.p_speed_vectors[k].dat[1] += bodies.p_fy[k] * (1 / bodies.p_values[k]) * (dt / 2);
bodies.p_speed_vectors[k].dat[2] += bodies.p_fz[k] * (1 / bodies.p_values[k]) * (dt / 2);
bodies.p_pos_x[k] += bodies.p_speed_vectors[k].dat[0] * dt;
bodies.p_pos_y[k] += bodies.p_speed_vectors[k].dat[1] * dt;
bodies.p_pos_z[k] += bodies.p_speed_vectors[k].dat[2] * dt;
}
*/
}
void K_Direct_method_Move(REAL_T dt ){
/************************* Move of the bodies: ******************************************/
bodies_it_t it;
for (bodies_it_Initialize(&it, &bodies);
bodies_it_Is_valid(&it);
bodies_it_Go2Next(&it)){
bodies_Kick_Move(&it,dt);
}
/*
bodies_ind_t k;
#pragma omp parallel for schedule(static)
for (k = 0; k < bodies.nb_bodies; ++k)
{
// fprintf(stderr,"c=%d thread %d \n", k, omp_get_thread_num());
bodies.p_speed_vectors[k].dat[0] += bodies.p_fx[k] * (1 / bodies.p_values[k]) * (dt / 2);
bodies.p_speed_vectors[k].dat[1] += bodies.p_fy[k] * (1 / bodies.p_values[k]) * (dt / 2);
bodies.p_speed_vectors[k].dat[2] += bodies.p_fz[k] * (1 / bodies.p_values[k]) * (dt / 2);
}
*/
}
/*********************************************************************************************
**********************************************************************************************
Direct_method_Terminate
**********************************************************************************************
*********************************************************************************************/
void Direct_method_Terminate(){
bodies_Free(&bodies);
FMB_free(p_allvalues);
if (Direct_are_data_bzipped2){
/* We recompress the data file: */
bzip2_file(Direct_data_file);
}
FMB_free(Direct_data_file);
}
void Direct_method_Terminate2(){
bodies_Free(&bodies);
if (Direct_are_data_bzipped2){
bzip2_file(Direct_data_file);
}
FMB_free(Direct_data_file);
}
/*********************************************************************************************
**********************************************************************************************
sum
**********************************************************************************************
*********************************************************************************************/
void Direct_method_Sum(char *results_file,
unsigned long step_number_value,
bodies_t *p_bodies,
VALUES_T total_potential_energy){
FILE *f_results;
position_t force_sum;
position_t force_sum_total;
bodies_it_t it;
f_results = f_output;
position_Initialize(&force_sum);
position_Initialize(&force_sum_total);
for (bodies_it_Initialize(&it, p_bodies);
bodies_it_Is_valid(&it);
bodies_it_Go2Next(&it)){
position_Set_x(&force_sum, position_Get_x(&force_sum) + bodies_it_Get_fx(&it));
position_Set_y(&force_sum, position_Get_y(&force_sum) + bodies_it_Get_fy(&it));
position_Set_z(&force_sum, position_Get_z(&force_sum) + bodies_it_Get_fz(&it));
}
MPI_Reduce(&force_sum, &force_sum_total, 3, MY_MPI_F, MPI_SUM, 0, MPI_COMM_WORLD);
if (my_rank == 0)
{
fprintf(f_results, "Sum (force): ");
position_Display(&force_sum_total, f_results, high);
fprintf(f_results, "\n");
}
}
/*********************************************************************************************
**********************************************************************************************
save
**********************************************************************************************
*********************************************************************************************/
void Direct_method_Dump_bodies(char *results_filename,
unsigned long step_number_value,
bodies_t *p_bodies)
{
bodies_it_t it;
/* Initialize Ouput operations: */
FMB_IO_InitO(results_filename);
if (FMB_IO_Info.output_format != NEMO_format){
/********** FMB file format: **********/
if (FMB_IO_Info.output_format == FMB_binary_format){
FMB_error("Unable to write the 'header' for FMB_binary_format in Direct_method_Dump_bodies(). \n");
}
FMB_IO_Print_header(step_number_value, FALSE /* only_position_and_value */,
bodies_Nb_bodies(p_bodies) * nb_proc, ¢er, half_side);
for (bodies_it_Initialize(&it, p_bodies);
bodies_it_Is_valid(&it);
bodies_it_Go2Next(&it)){
FMB_IO_Print_body_from_bodies_it(&it, FALSE /* only_position_and_value */);
} /* for */
} /* if (FMB_IO_Info.output_format != NEMO_format) */
else {
/********** NEMO file format: **********/
FMB_IO_Print_all_bodies_from_bodies_t(p_bodies);
} /* else (FMB_IO_Info.output_format != NEMO_format) */
/* Terminate Output operations: */
FMB_IO_TerminateO();
}
|
bfs_csr_bsp.c | #include "graph_defs.h"
#include "prefetcher.h"
#include <limits.h>
typedef struct bfs_metadata_st {
char touched;
volatile unsigned long queue_next;
} bfs_metadata_t;
static volatile unsigned long queue_head = ULONG_MAX;
static volatile unsigned long vertex_position = 0;
static bfs_metadata_t *metadata;
static csr_t * volatile graph;
unsigned long MAX_CACHE = ULONG_MAX;
long MIN_CACHE = 0;
unsigned long visited = 0;
void prefetcher_random_callback(unsigned long *laf, unsigned long laf_size,
unsigned long ift) {
static unsigned long old_hoq = ULONG_MAX;
unsigned long current_hoq = ULONG_MAX;
static unsigned long ra_depth = 0;
static char preload = 0;
static long pf_visited = 0;
unsigned long entries = 0;
/* Fill in inner-loop entries from BFS queue */
/*
if ((preload == 0) && (ra_depth > MAX_CACHE)) {
preload = 1;
current_hoq = ULONG_MAX;
}
*/
current_hoq = old_hoq;
if ((current_hoq == ULONG_MAX)
|| (((signed long) (pf_visited - visited)) > MIN_CACHE)/*|| (ra_depth > MIN_CACHE)*/) {
current_hoq = queue_head;
pf_visited = visited;
// ra_depth = 0;
}
// if (((signed long)(pf_visited - visited)) > MIN_CACHE) return;
/* if(current_hoq != ULONG_MAX) {
current_hoq = metadata[current_hoq].queue_next;
}
*/
while (entries != ift && current_hoq != ULONG_MAX) {
unsigned long page = graph->index[current_hoq];
unsigned long end = graph->index[current_hoq + 1];
page = page >> (ASSUME_PAGE_SHIFT + 3); /* offset is in bits ! */
end = end >> (ASSUME_PAGE_SHIFT + 3);
// if(laf[HASH_MODULO(page, laf_size)] != page) {
// laf[HASH_MODULO(page, laf_size)] = page;
// for (; page <= end; page++) {
// if (entries==ift) break;
laf[entries] = page;
if (end > page)
laf[entries + (2 * laf_size)] = end - page;
entries++;
// }
// }
old_hoq = current_hoq;
current_hoq = metadata[current_hoq].queue_next;
pf_visited++;
}
ra_depth += entries;
}
unsigned long prefetcher_sequential_callback(unsigned long* aux_offset) {
unsigned long offset = graph->index[vertex_position];
return offset >> (ASSUME_PAGE_SHIFT + 3);
}
unsigned long alist_entries_seen = 0;
// #pragma omp threadprivate(current_vertex)
unsigned long total_queue_demands = 0;
unsigned long queue_above_threshold = 0;
unsigned long queue_length = 0;
/* returns number of connected components */
static unsigned long bfs(csr_t *graph, unsigned long start_node) {
unsigned long i;
unsigned long components = 0;
unsigned long queue_tail = ULONG_MAX;
unsigned long nq_head = ULONG_MAX;
unsigned long nq_tail = ULONG_MAX;
char* finished_flag = malloc(sizeof(char) * omp_get_num_threads());
unsigned long time_comp, time_giant = 0, id_giant;
i = start_node;
do {
int level = 0;
vertex_position = i;
if (metadata[i].touched == 0) {
CLOCK_START(time_comp);
metadata[i].touched = 1;
visited++;
components++;
//fprintf(stderr, "C %ld %d\n", i, omp_get_thread_num());
} else {
i++;
if (i >= graph->vertex_cnt)
i = 0;
continue;
}
while (1) {
unsigned long current_vertex;
level++;
memset(finished_flag, 1, omp_get_num_threads());
#pragma omp parallel for
for (current_vertex = 0; current_vertex < graph->vertex_cnt;
current_vertex++) {
if (metadata[current_vertex].touched != level)
continue;
csr_edge_iterator_t iter;
csr_init_edge_iterator(graph, current_vertex, &iter);
while (csr_iter_step(graph, &iter) == 0) {
if (!iter.incoming) {
unsigned long target = iter.neighbour;
#pragma omp critical (atomicset)
{
if (metadata[target].touched == 0) {
metadata[target].touched = level + 1;
finished_flag[omp_get_thread_num()] = 0;
visited++;
//fprintf(stderr, "V %ld %d\n", target, omp_get_thread_num());
}
}
}
}
}
int j;
for (j = 0; j < omp_get_num_threads(); j++) {
if (finished_flag[i] == 0) {
break;
}
}
if (j == omp_get_num_threads()) break;
}
CLOCK_STOP(time_comp);
//fprintf(stderr, "%ld\n", time_comp);
if (time_comp > time_giant) {
time_giant = time_comp;
id_giant = i;
printf("Visited %ld\n", visited);
return 1;
}
i++;
if (i >= graph->vertex_cnt) {
i = 0;
}
} while (i != start_node);
// fprintf(stderr, "%ld %ld\n", visited, graph->vertex_cnt);
assert(visited == graph->vertex_cnt);
printf("TIME GIANT COMP %lu\n", time_giant);
printf("ID GIANT COMP %lu\n", id_giant);
return components;
}
int main(int argc, char **argv) {
unsigned long time_bfs, time_total, components;
CLOCK_START(time_total);
if (argc < 3) {
fprintf(stderr, "Usage %s graph_name root_id\n", argv[0]);
exit(-1);
}
#ifdef PREFETCHER
char *env_var;
env_var = getenv("CMAX");
if(env_var != NULL) {
MAX_CACHE = atol(env_var);
}
env_var = getenv("CMIN");
if(env_var != NULL) {
MIN_CACHE = atol(env_var);
}
bind_master();
init_prefetcher(prefetcher_random_callback,
NULL);
// prefetcher_sequential_callback);
#endif
graph = open_csr(argv[1]);
metadata = (bfs_metadata_t*) map_anon_memory(
graph->vertex_cnt * sizeof(bfs_metadata_t), "vertex metadata");
//balloon_inflate(); /* Simulate semi-em conditions */
print_mlocked_memory();
unsigned long root_id = atol(argv[2]);
assert(root_id < graph->vertex_cnt);
/* Perhaps mmap /dev/null instead ? */
memset(metadata, 0, graph->vertex_cnt * sizeof(bfs_metadata_t));
#ifdef PREFETCHER
launch_prefetch_thread(graph->fd_calist);
#endif
struct rusage ru_begin;
getrusage(RUSAGE_SELF, &ru_begin);
CLOCK_START(time_bfs);
components = bfs(graph, root_id);
CLOCK_STOP(time_bfs);
struct rusage ru_end;
getrusage(RUSAGE_SELF, &ru_end);
#ifdef PREFETCHER
terminate_prefetch_thread();
destroy_prefetcher();
#endif
munmap(metadata, graph->vertex_cnt * sizeof(bfs_metadata_t));
close_csr(graph);
CLOCK_STOP(time_total);
printf("COMPONENTS %lu\n", components);
printf("TIME BFS %lu\n", time_bfs);
printf("TIME TOTAL %lu\n", time_total);
print_rusage_stats(stdout, &ru_begin, &ru_end);
printf("F_THRESHOLD %f\n",
((double) queue_above_threshold) / total_queue_demands);
return 0;
}
|
resize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE %
% R R E SS I ZZ E %
% RRRR EEE SSS I ZZZ EEE %
% R R E SS I ZZ E %
% R R EEEEE SSSSS IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Image Resize Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/draw.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/magick.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/option.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/resize-private.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
#if defined(MAGICKCORE_LQR_DELEGATE)
#include <lqr.h>
#endif
/*
Typedef declarations.
*/
struct _ResizeFilter
{
MagickRealType
(*filter)(const MagickRealType,const ResizeFilter *),
(*window)(const MagickRealType,const ResizeFilter *),
support, /* filter region of support - the filter support limit */
window_support, /* window support, usally equal to support (expert only) */
scale, /* dimension scaling to fit window support (usally 1.0) */
blur, /* x-scale (blur-sharpen) */
coefficient[7]; /* cubic coefficents for BC-cubic filters */
size_t
signature;
};
/*
Forward declaractions.
*/
static MagickRealType
I0(MagickRealType x),
BesselOrderOne(MagickRealType),
Sinc(const MagickRealType, const ResizeFilter *),
SincFast(const MagickRealType, const ResizeFilter *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F i l t e r F u n c t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% These are the various filter and windowing functions that are provided.
%
% They are internal to this module only. See AcquireResizeFilterInfo() for
% details of the access to these functions, via the GetResizeFilterSupport()
% and GetResizeFilterWeight() API interface.
%
% The individual filter functions have this format...
%
% static MagickRealtype *FilterName(const MagickRealType x,
% const MagickRealType support)
%
% A description of each parameter follows:
%
% o x: the distance from the sampling point generally in the range of 0 to
% support. The GetResizeFilterWeight() ensures this a positive value.
%
% o resize_filter: current filter information. This allows function to
% access support, and possibly other pre-calculated information defining
% the functions.
%
*/
static MagickRealType Blackman(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Blackman: 2nd order cosine windowing function:
0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x)
Refactored by Chantal Racette and Nicolas Robidoux to one trig call and
five flops.
*/
const MagickRealType cosine=cos((double) (MagickPI*x));
return(0.34+cosine*(0.5+cosine*0.16));
}
static MagickRealType Bohman(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Bohman: 2rd Order cosine windowing function:
(1-x) cos(pi x) + sin(pi x) / pi.
Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops,
taking advantage of the fact that the support of Bohman is 1.0 (so that we
know that sin(pi x) >= 0).
*/
const double cosine=cos((double) (MagickPI*x));
const double sine=sqrt(1.0-cosine*cosine);
return((MagickRealType) ((1.0-x)*cosine+(1.0/MagickPI)*sine));
}
static MagickRealType Box(const MagickRealType magick_unused(x),
const ResizeFilter *magick_unused(resize_filter))
{
/*
A Box filter is a equal weighting function (all weights equal).
DO NOT LIMIT results by support or resize point sampling will work
as it requests points beyond its normal 0.0 support size.
*/
return(1.0);
}
static MagickRealType Cosine(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
cos((pi/2)*x).
*/
return((MagickRealType)cos((double) (MagickPI2*x)));
}
static MagickRealType CubicBC(const MagickRealType x,
const ResizeFilter *resize_filter)
{
/*
Cubic Filters using B,C determined values:
Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter
Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears
Spline B = 1 C = 0 B-Spline Gaussian approximation
Hermite B = 0 C = 0 B-Spline interpolator
See paper by Mitchell and Netravali, Reconstruction Filters in Computer
Graphics Computer Graphics, Volume 22, Number 4, August 1988
http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/
Mitchell.pdf.
Coefficents are determined from B,C values:
P0 = ( 6 - 2*B )/6 = coeff[0]
P1 = 0
P2 = (-18 +12*B + 6*C )/6 = coeff[1]
P3 = ( 12 - 9*B - 6*C )/6 = coeff[2]
Q0 = ( 8*B +24*C )/6 = coeff[3]
Q1 = ( -12*B -48*C )/6 = coeff[4]
Q2 = ( 6*B +30*C )/6 = coeff[5]
Q3 = ( - 1*B - 6*C )/6 = coeff[6]
which are used to define the filter:
P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1
Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2
which ensures function is continuous in value and derivative (slope).
*/
if (x < 1.0)
return(resize_filter->coefficient[0]+x*(x*
(resize_filter->coefficient[1]+x*resize_filter->coefficient[2])));
if (x < 2.0)
return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x*
(resize_filter->coefficient[5]+x*resize_filter->coefficient[6])));
return(0.0);
}
static MagickRealType Gaussian(const MagickRealType x,
const ResizeFilter *resize_filter)
{
/*
Gaussian with a sigma = 1/2 (or as user specified)
Gaussian Formula (1D) ...
exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2))
Gaussian Formula (2D) ...
exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
or for radius
exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
Note that it is only a change from 1-d to radial form is in the
normalization multiplier which is not needed or used when Gaussian is used
as a filter.
The constants are pre-calculated...
coeff[0]=sigma;
coeff[1]=1.0/(2.0*sigma^2);
coeff[2]=1.0/(sqrt(2*PI)*sigma^2);
exp( -coeff[1]*(x^2)) ) * coeff[2];
However the multiplier coeff[1] is need, the others are informative only.
This separates the gaussian 'sigma' value from the 'blur/support'
settings allowing for its use in special 'small sigma' gaussians,
without the filter 'missing' pixels because the support becomes too
small.
*/
return(exp((double)(-resize_filter->coefficient[1]*x*x)));
}
static MagickRealType Hanning(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
0.5+0.5*cos(pi*x).
*/
const MagickRealType cosine=cos((double) (MagickPI*x));
return(0.5+0.5*cosine);
}
static MagickRealType Hamming(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Offset cosine window function:
.54 + .46 cos(pi x).
*/
const MagickRealType cosine=cos((double) (MagickPI*x));
return(0.54+0.46*cosine);
}
static MagickRealType Jinc(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions.
http://mathworld.wolfram.com/JincFunction.html and page 11 of
http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf
The original "zoom" program by Paul Heckbert called this "Bessel". But
really it is more accurately named "Jinc".
*/
if (x == 0.0)
return((MagickRealType) (0.5*MagickPI));
return(BesselOrderOne((MagickRealType) MagickPI*x)/x);
}
static MagickRealType Kaiser(const MagickRealType x,
const ResizeFilter *resize_filter)
{
/*
Kaiser Windowing Function (bessel windowing)
I0( beta * sqrt( 1-x^2) ) / IO(0)
Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5).
However it is typically defined in terms of Alpha*PI
The normalization factor (coeff[1]) is not actually needed,
but without it the filters has a large value at x=0 making it
difficult to compare the function with other windowing functions.
*/
return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]*
sqrt((double) (1.0-x*x))));
}
static MagickRealType Lagrange(const MagickRealType x,
const ResizeFilter *resize_filter)
{
MagickRealType
value;
register ssize_t
i;
ssize_t
n,
order;
/*
Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange
function and depends on the overall support window size of the filter. That
is: for a support of 2, it gives a lagrange-4 (piecewise cubic function).
"n" identifies the piece of the piecewise polynomial.
See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging,
Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064.
*/
if (x > resize_filter->support)
return(0.0);
order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */
n=(ssize_t) (resize_filter->window_support+x);
value=1.0f;
for (i=0; i < order; i++)
if (i != n)
value*=(n-i-x)/(n-i);
return(value);
}
static MagickRealType Quadratic(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
2rd order (quadratic) B-Spline approximation of Gaussian.
*/
if (x < 0.5)
return(0.75-x*x);
if (x < 1.5)
return(0.5*(x-1.5)*(x-1.5));
return(0.0);
}
static MagickRealType Sinc(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Scaled sinc(x) function using a trig call:
sinc(x) == sin(pi x)/(pi x).
*/
if (x != 0.0)
{
const MagickRealType alpha=(MagickRealType) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
return((MagickRealType) 1.0);
}
static MagickRealType SincFast(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Approximations of the sinc function sin(pi x)/(pi x) over the interval
[-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding
from the Natural Sciences and Engineering Research Council of Canada.
Although the approximations are polynomials (for low order of
approximation) and quotients of polynomials (for higher order of
approximation) and consequently are similar in form to Taylor polynomials /
Pade approximants, the approximations are computed with a completely
different technique.
Summary: These approximations are "the best" in terms of bang (accuracy)
for the buck (flops). More specifically: Among the polynomial quotients
that can be computed using a fixed number of flops (with a given "+ - * /
budget"), the chosen polynomial quotient is the one closest to the
approximated function with respect to maximum absolute relative error over
the given interval.
The Remez algorithm, as implemented in the boost library's minimax package,
is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/
math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html
If outside of the interval of approximation, use the standard trig formula.
*/
if (x > 4.0)
{
const MagickRealType alpha=(MagickRealType) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
{
/*
The approximations only depend on x^2 (sinc is an even function).
*/
const MagickRealType xx = x*x;
#if MAGICKCORE_QUANTUM_DEPTH <= 8
/*
Maximum absolute relative error 6.3e-6 < 1/2^17.
*/
const double c0 = 0.173610016489197553621906385078711564924e-2L;
const double c1 = -0.384186115075660162081071290162149315834e-3L;
const double c2 = 0.393684603287860108352720146121813443561e-4L;
const double c3 = -0.248947210682259168029030370205389323899e-5L;
const double c4 = 0.107791837839662283066379987646635416692e-6L;
const double c5 = -0.324874073895735800961260474028013982211e-8L;
const double c6 = 0.628155216606695311524920882748052490116e-10L;
const double c7 = -0.586110644039348333520104379959307242711e-12L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#elif MAGICKCORE_QUANTUM_DEPTH <= 16
/*
Max. abs. rel. error 2.2e-8 < 1/2^25.
*/
const double c0 = 0.173611107357320220183368594093166520811e-2L;
const double c1 = -0.384240921114946632192116762889211361285e-3L;
const double c2 = 0.394201182359318128221229891724947048771e-4L;
const double c3 = -0.250963301609117217660068889165550534856e-5L;
const double c4 = 0.111902032818095784414237782071368805120e-6L;
const double c5 = -0.372895101408779549368465614321137048875e-8L;
const double c6 = 0.957694196677572570319816780188718518330e-10L;
const double c7 = -0.187208577776590710853865174371617338991e-11L;
const double c8 = 0.253524321426864752676094495396308636823e-13L;
const double c9 = -0.177084805010701112639035485248501049364e-15L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9))))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#else
/*
Max. abs. rel. error 1.2e-12 < 1/2^39.
*/
const double c0 = 0.173611111110910715186413700076827593074e-2L;
const double c1 = -0.289105544717893415815859968653611245425e-3L;
const double c2 = 0.206952161241815727624413291940849294025e-4L;
const double c3 = -0.834446180169727178193268528095341741698e-6L;
const double c4 = 0.207010104171026718629622453275917944941e-7L;
const double c5 = -0.319724784938507108101517564300855542655e-9L;
const double c6 = 0.288101675249103266147006509214934493930e-11L;
const double c7 = -0.118218971804934245819960233886876537953e-13L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
const double d0 = 1.0L;
const double d1 = 0.547981619622284827495856984100563583948e-1L;
const double d2 = 0.134226268835357312626304688047086921806e-2L;
const double d3 = 0.178994697503371051002463656833597608689e-4L;
const double d4 = 0.114633394140438168641246022557689759090e-6L;
const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4)));
return((MagickRealType) ((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p));
#endif
}
}
static MagickRealType Triangle(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or
a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function
for Sinc().
*/
if (x < 1.0)
return(1.0-x);
return(0.0);
}
static MagickRealType Welsh(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Welsh parabolic windowing filter.
*/
if (x < 1.0)
return(1.0-x*x);
return(0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResizeFilter() allocates the ResizeFilter structure. Choose from
% these filters:
%
% FIR (Finite impulse Response) Filters
% Box Triangle Quadratic
% Spline Hermite Catrom
% Mitchell
%
% IIR (Infinite impulse Response) Filters
% Gaussian Sinc Jinc (Bessel)
%
% Windowed Sinc/Jinc Filters
% Blackman Bohman Lanczos
% Hann Hamming Cosine
% Kaiser Welch Parzen
% Bartlett
%
% Special Purpose Filters
% Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp
% Robidoux RobidouxSharp
%
% The users "-filter" selection is used to lookup the default 'expert'
% settings for that filter from a internal table. However any provided
% 'expert' settings (see below) may override this selection.
%
% FIR filters are used as is, and are limited to that filters support window
% (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also
% simply clipped by its support size (currently 1.5 or approximately 3*sigma
% as recommended by many references)
%
% The special a 'cylindrical' filter flag will promote the default 4-lobed
% Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better
% suited to this style of image resampling. This typically happens when using
% such a filter for images distortions.
%
% SPECIFIC FILTERS:
%
% Directly requesting 'Sinc', 'Jinc' function as a filter will force the use
% of function without any windowing, or promotion for cylindrical usage. This
% is not recommended, except by image processing experts, especially as part
% of expert option filter function selection.
%
% Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is
% computed using the traditional sin(pi*x)/(pi*x); it is selected if the user
% specifically specifies the use of a Sinc filter. SincFast uses highly
% accurate (and fast) polynomial (low Q) and rational (high Q) approximations,
% and will be used by default in most cases.
%
% The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted
% to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use).
% The Sinc version is the most popular windowed filter.
%
% LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of
% the Lanczos filter, specifically designed for EWA distortion (as a
% Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos
% (Sinc-Sinc) filter. The chosen blur value comes as close as possible to
% satisfying the following condition without changing the character of the
% corresponding EWA filter:
%
% 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with
% only vertical or horizontal features are preserved when performing 'no-op"
% with EWA distortion.
%
% The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos
% filters. The 'sharp' version uses a blur factor of 0.9549963639785485,
% again chosen because the resulting EWA filter comes as close as possible to
% satisfying the above condition.
%
% Robidoux is another filter tuned for EWA. It is the Keys cubic filter
% defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op'
% Vertical and Horizontal Line Preservation Condition" exactly, and it
% moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns
% out to be close to both Mitchell and Lanczos2Sharp. For example, its first
% crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the
% first crossing of Mitchell and Lanczos2Sharp.
%
% RodidouxSharp is a slightly sharper version of Rodidoux, some believe it
% is too sharp. It is designed to minimize the maximum possible change in
% a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op
% conditions. Amazingly Mitchell falls roughly between Rodidoux and
% RodidouxSharp, though this seems to have been pure coincidence.
%
% 'EXPERT' OPTIONS:
%
% These artifact "defines" are not recommended for production use without
% expert knowledge of resampling, filtering, and the effects they have on the
% resulting resampled (resized or distorted) image.
%
% They can be used to override any and all filter default, and it is
% recommended you make good use of "filter:verbose" to make sure that the
% overall effect of your selection (before and after) is as expected.
%
% "filter:verbose" controls whether to output the exact results of the
% filter selections made, as well as plotting data for graphing the
% resulting filter over the filters support range.
%
% "filter:filter" select the main function associated with this filter
% name, as the weighting function of the filter. This can be used to
% set a windowing function as a weighting function, for special
% purposes, such as graphing.
%
% If a "filter:window" operation has not been provided, a 'Box'
% windowing function will be set to denote that no windowing function is
% being used.
%
% "filter:window" Select this windowing function for the filter. While any
% filter could be used as a windowing function, using the 'first lobe' of
% that filter over the whole support window, using a non-windowing
% function is not advisible. If no weighting filter function is specified
% a 'SincFast' filter is used.
%
% "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a
% simpler method of setting filter support size that will correctly
% handle the Sinc/Jinc switch for an operators filtering requirements.
% Only integers should be given.
%
% "filter:support" Set the support size for filtering to the size given.
% This not recommended for Sinc/Jinc windowed filters (lobes should be
% used instead). This will override any 'filter:lobes' option.
%
% "filter:win-support" Scale windowing function to this size instead. This
% causes the windowing (or self-windowing Lagrange filter) to act is if
% the support window it much much larger than what is actually supplied
% to the calling operator. The filter however is still clipped to the
% real support size given, by the support range supplied to the caller.
% If unset this will equal the normal filter support size.
%
% "filter:blur" Scale the filter and support window by this amount. A value
% of > 1 will generally result in a more blurred image with more ringing
% effects, while a value <1 will sharpen the resulting image with more
% aliasing effects.
%
% "filter:sigma" The sigma value to use for the Gaussian filter only.
% Defaults to '1/2'. Using a different sigma effectively provides a
% method of using the filter as a 'blur' convolution. Particularly when
% using it for Distort.
%
% "filter:b"
% "filter:c" Override the preset B,C values for a Cubic filter.
% If only one of these are given it is assumes to be a 'Keys' type of
% filter such that B+2C=1, where Keys 'alpha' value = C.
%
% Examples:
%
% Set a true un-windowed Sinc filter with 10 lobes (very slow):
% -define filter:filter=Sinc
% -define filter:lobes=8
%
% Set an 8 lobe Lanczos (Sinc or Jinc) filter:
% -filter Lanczos
% -define filter:lobes=8
%
% The format of the AcquireResizeFilter method is:
%
% ResizeFilter *AcquireResizeFilter(const Image *image,
% const FilterTypes filter_type,const MagickBooleanType cylindrical,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filter: the filter type, defining a preset filter, window and support.
% The artifact settings listed above will override those selections.
%
% o blur: blur the filter by this amount, use 1.0 if unknown. Image
% artifact "filter:blur" will override this API call usage, including any
% internal change (such as for cylindrical usage).
%
% o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial)
% filter (Jinc).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ResizeFilter *AcquireResizeFilter(const Image *image,
const FilterTypes filter,const MagickRealType blur,
const MagickBooleanType cylindrical,ExceptionInfo *exception)
{
const char
*artifact;
FilterTypes
filter_type,
window_type;
MagickRealType
B,
C,
value;
register ResizeFilter
*resize_filter;
/*
Table Mapping given Filter, into Weighting and Windowing functions.
A 'Box' windowing function means its a simble non-windowed filter.
An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a
"cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was
specifically requested by the user.
WARNING: The order of this table must match the order of the FilterTypes
enumeration specified in "resample.h", or the filter names will not match
the filter being setup.
You can check filter setups with the "filter:verbose" expert setting.
*/
static struct
{
FilterTypes
filter,
window;
} const mapping[SentinelFilter] =
{
{ UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */
{ PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */
{ BoxFilter, BoxFilter }, /* Box averaging filter */
{ TriangleFilter, BoxFilter }, /* Linear interpolation filter */
{ HermiteFilter, BoxFilter }, /* Hermite interpolation filter */
{ SincFastFilter, HanningFilter }, /* Hanning -- cosine-sinc */
{ SincFastFilter, HammingFilter }, /* Hamming -- '' variation */
{ SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */
{ GaussianFilter, BoxFilter }, /* Gaussian blur filter */
{ QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */
{ CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */
{ CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */
{ MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */
{ JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */
{ SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */
{ SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */
{ SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */
{ LanczosFilter, WelshFilter }, /* Welch -- parabolic (3 lobe) */
{ SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */
{ SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */
{ SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */
{ LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */
{ LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */
{ LanczosSharpFilter, LanczosSharpFilter }, /* | these require */
{ Lanczos2Filter, Lanczos2Filter }, /* | special handling */
{ Lanczos2SharpFilter, Lanczos2SharpFilter },
{ RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */
{ RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */
{ LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */
{ SplineFilter, BoxFilter }, /* Spline Cubic Filter */
{ LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */
};
/*
Table mapping the filter/window from the above table to an actual function.
The default support size for that filter as a weighting function, the range
to scale with to use that function as a sinc windowing function, (typ 1.0).
Note that the filter_type -> function is 1 to 1 except for Sinc(),
SincFast(), and CubicBC() functions, which may have multiple filter to
function associations.
See "filter:verbose" handling below for the function -> filter mapping.
*/
static struct
{
MagickRealType
(*function)(const MagickRealType,const ResizeFilter*);
double
support, /* Default lobes/support size of the weighting filter. */
scale, /* Support when function used as a windowing function
Typically equal to the location of the first zero crossing. */
B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */
} const filters[SentinelFilter] =
{
/* .--- support window (if used as a Weighting Function)
| .--- first crossing (if used as a Windowing Function)
| | .--- B value for Cubic Function
| | | .---- C value for Cubic Function
| | | | */
{ Box, 0.5, 0.5, 0.0, 0.0 }, /* Undefined (default to Box) */
{ Box, 0.0, 0.5, 0.0, 0.0 }, /* Point (special handling) */
{ Box, 0.5, 0.5, 0.0, 0.0 }, /* Box */
{ Triangle, 1.0, 1.0, 0.0, 0.0 }, /* Triangle */
{ CubicBC, 1.0, 1.0, 0.0, 0.0 }, /* Hermite (cubic B=C=0) */
{ Hanning, 1.0, 1.0, 0.0, 0.0 }, /* Hann, cosine window */
{ Hamming, 1.0, 1.0, 0.0, 0.0 }, /* Hamming, '' variation */
{ Blackman, 1.0, 1.0, 0.0, 0.0 }, /* Blackman, 2*cosine window */
{ Gaussian, 2.0, 1.5, 0.0, 0.0 }, /* Gaussian */
{ Quadratic, 1.5, 1.5, 0.0, 0.0 }, /* Quadratic gaussian */
{ CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* General Cubic Filter */
{ CubicBC, 2.0, 1.0, 0.0, 0.5 }, /* Catmull-Rom (B=0,C=1/2) */
{ CubicBC, 2.0, 8.0/7.0, 1./3., 1./3. }, /* Mitchell (B=C=1/3) */
{ Jinc, 3.0, 1.2196698912665045, 0.0, 0.0 }, /* Raw 3-lobed Jinc */
{ Sinc, 4.0, 1.0, 0.0, 0.0 }, /* Raw 4-lobed Sinc */
{ SincFast, 4.0, 1.0, 0.0, 0.0 }, /* Raw fast sinc ("Pade"-type) */
{ Kaiser, 1.0, 1.0, 0.0, 0.0 }, /* Kaiser (square root window) */
{ Welsh, 1.0, 1.0, 0.0, 0.0 }, /* Welsh (parabolic window) */
{ CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* Parzen (B-Spline window) */
{ Bohman, 1.0, 1.0, 0.0, 0.0 }, /* Bohman, 2*Cosine window */
{ Triangle, 1.0, 1.0, 0.0, 0.0 }, /* Bartlett (triangle window) */
{ Lagrange, 2.0, 1.0, 0.0, 0.0 }, /* Lagrange sinc approximation */
{ SincFast, 3.0, 1.0, 0.0, 0.0 }, /* Lanczos, 3-lobed Sinc-Sinc */
{ SincFast, 3.0, 1.0, 0.0, 0.0 }, /* Lanczos, Sharpened */
{ SincFast, 2.0, 1.0, 0.0, 0.0 }, /* Lanczos, 2-lobed */
{ SincFast, 2.0, 1.0, 0.0, 0.0 }, /* Lanczos2, sharpened */
/* Robidoux: Keys cubic close to Lanczos2D sharpened */
{ CubicBC, 2.0, 1.1685777620836932,
0.37821575509399867, 0.31089212245300067 },
/* RobidouxSharp: Sharper version of Robidoux */
{ CubicBC, 2.0, 1.105822933719019,
0.2620145123990142, 0.3689927438004929 },
{ Cosine, 1.0, 1.0, 0.0, 0.0 }, /* Low level cosine window */
{ CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* Cubic B-Spline (B=1,C=0) */
{ SincFast, 3.0, 1.0, 0.0, 0.0 }, /* Lanczos, Interger Radius */
};
/*
The known zero crossings of the Jinc() or more accurately the Jinc(x*PI)
function being used as a filter. It is used by the "filter:lobes" expert
setting and for 'lobes' for Jinc functions in the previous table. This way
users do not have to deal with the highly irrational lobe sizes of the Jinc
filter.
Values taken from
http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp
using Jv-function with v=1, then dividing by PI.
*/
static double
jinc_zeros[16] =
{
1.2196698912665045,
2.2331305943815286,
3.2383154841662362,
4.2410628637960699,
5.2427643768701817,
6.2439216898644877,
7.2447598687199570,
8.2453949139520427,
9.2458926849494673,
10.246293348754916,
11.246622794877883,
12.246898461138105,
13.247132522181061,
14.247333735806849,
15.247508563037300,
16.247661874700962
};
/*
Allocate resize filter.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(UndefinedFilter < filter && filter < SentinelFilter);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
resize_filter=(ResizeFilter *) AcquireMagickMemory(sizeof(*resize_filter));
if (resize_filter == (ResizeFilter *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(resize_filter,0,sizeof(*resize_filter));
/*
Defaults for the requested filter.
*/
filter_type=mapping[filter].filter;
window_type=mapping[filter].window;
resize_filter->blur = blur; /* function argument blur factor (1.0) */
/* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */
if (cylindrical != MagickFalse && filter_type == SincFastFilter
&& filter != SincFastFilter )
filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */
/* Expert filter setting override */
artifact=GetImageArtifact(image,"filter:filter");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{ /* Raw filter request - no window function. */
filter_type=(FilterTypes) option;
window_type=BoxFilter;
}
/* Filter override with a specific window function. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
window_type=(FilterTypes) option;
}
}
else
{
/* Window specified, but no filter function? Assume Sinc/Jinc. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{
filter_type=cylindrical != MagickFalse ?
JincFilter : SincFastFilter;
window_type=(FilterTypes) option;
}
}
}
/* Assign the real functions to use for the filters selected. */
resize_filter->filter=filters[filter_type].function;
resize_filter->support=filters[filter_type].support;
resize_filter->window=filters[window_type].function;
resize_filter->scale=filters[window_type].scale;
resize_filter->signature=MagickSignature;
/* Filter Modifications for orthogonal/cylindrical usage */
if (cylindrical != MagickFalse)
switch (filter_type)
{
case BoxFilter:
/* Support for Cylindrical Box should be sqrt(2)/2 */
resize_filter->support=(MagickRealType) MagickSQ1_2;
break;
case LanczosFilter:
case LanczosSharpFilter:
case Lanczos2Filter:
case Lanczos2SharpFilter:
case LanczosRadiusFilter:
resize_filter->filter=filters[JincFilter].function;
resize_filter->window=filters[JincFilter].function;
resize_filter->scale=filters[JincFilter].scale;
/* number of lobes (support window size) remain unchanged */
break;
default:
break;
}
/* Global Sharpening (regardless of orthoginal/cylindrical) */
switch (filter_type)
{
case LanczosSharpFilter:
resize_filter->blur *= (MagickRealType) 0.9812505644269356;
break;
case Lanczos2SharpFilter:
resize_filter->blur *= (MagickRealType) 0.9549963639785485;
break;
/* case LanczosRadius: blur adjust is done after lobes */
default:
break;
}
/*
Expert Option Modifications.
*/
/* User Gaussian Sigma Override - no support change */
if ((resize_filter->filter == Gaussian) ||
(resize_filter->window == Gaussian) ) {
value=0.5; /* guassian sigma default, half pixel */
artifact=GetImageArtifact(image,"filter:sigma");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
/* Define coefficents for Gaussian */
resize_filter->coefficient[0]=value; /* note sigma too */
resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */
resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value);
/* normalization - not actually needed or used! */
if ( value > 0.5 )
resize_filter->support *= value/0.5; /* increase support */
}
/* User Kaiser Alpha Override - no support change */
if ((resize_filter->filter == Kaiser) ||
(resize_filter->window == Kaiser) ) {
value=6.5; /* default beta value for Kaiser bessel windowing function */
artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-beta");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-alpha");
if (artifact != (const char *) NULL)
value=(MagickRealType) (StringToDouble(artifact,(char **) NULL)*MagickPI);
/* Define coefficents for Kaiser Windowing Function */
resize_filter->coefficient[0]=value; /* alpha */
resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value)); /* normalization */
}
/* Support Overrides */
artifact=GetImageArtifact(image,"filter:lobes");
if (artifact != (const char *) NULL)
{
ssize_t
lobes;
lobes=(ssize_t) StringToLong(artifact);
if (lobes < 1)
lobes=1;
resize_filter->support=(MagickRealType) lobes;
}
/* Convert a Jinc function lobes value to a real support value */
if (resize_filter->filter == Jinc)
{
if (resize_filter->support > 16)
resize_filter->support=jinc_zeros[15]; /* largest entry in table */
else
resize_filter->support=jinc_zeros[((long)resize_filter->support)-1];
/* blur this filter so support is a integer value (lobes dependant) */
if (filter_type == LanczosRadiusFilter)
{
resize_filter->blur *= floor(resize_filter->support)/
resize_filter->support;
}
}
/* Expert Blur Override */
artifact=GetImageArtifact(image,"filter:blur");
if (artifact != (const char *) NULL)
resize_filter->blur*=StringToDouble(artifact,(char **) NULL);
if (resize_filter->blur < MagickEpsilon)
resize_filter->blur=(MagickRealType) MagickEpsilon;
/* Expert override of the support setting */
artifact=GetImageArtifact(image,"filter:support");
if (artifact != (const char *) NULL)
resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Scale windowing function separately to the support 'clipping'
window that calling operator is planning to actually use. (Expert
override)
*/
resize_filter->window_support=resize_filter->support; /* default */
artifact=GetImageArtifact(image,"filter:win-support");
if (artifact != (const char *) NULL)
resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Adjust window function scaling to match windowing support for
weighting function. This avoids a division on every filter call.
*/
resize_filter->scale/=resize_filter->window_support;
/*
* Set Cubic Spline B,C values, calculate Cubic coefficients.
*/
B=0.0;
C=0.0;
if ((resize_filter->filter == CubicBC) ||
(resize_filter->window == CubicBC) )
{
B=filters[filter_type].B;
C=filters[filter_type].C;
if (filters[window_type].function == CubicBC)
{
B=filters[window_type].B;
C=filters[window_type].C;
}
artifact=GetImageArtifact(image,"filter:b");
if (artifact != (const char *) NULL)
{
B=StringToDouble(artifact,(char **) NULL);
C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */
artifact=GetImageArtifact(image,"filter:c"); /* user C override */
if (artifact != (const char *) NULL)
C=StringToDouble(artifact,(char **) NULL);
}
else
{
artifact=GetImageArtifact(image,"filter:c");
if (artifact != (const char *) NULL)
{
C=StringToDouble(artifact,(char **) NULL);
B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */
}
}
/* Convert B,C values into Cubic Coefficents. See CubicBC(). */
{
const double twoB = B+B;
resize_filter->coefficient[0]=1.0-(1.0/3.0)*B;
resize_filter->coefficient[1]=-3.0+twoB+C;
resize_filter->coefficient[2]=2.0-1.5*B-C;
resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C;
resize_filter->coefficient[4]=-8.0*C-twoB;
resize_filter->coefficient[5]=B+5.0*C;
resize_filter->coefficient[6]=(-1.0/6.0)*B-C;
}
}
/*
Expert Option Request for verbose details of the resulting filter.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp master
{
#endif
artifact=GetImageArtifact(image,"filter:verbose");
if (IsMagickTrue(artifact))
{
double
support,
x;
/*
Set the weighting function properly when the weighting
function may not exactly match the filter of the same name.
EG: a Point filter is really uses a Box weighting function
with a different support than is typically used.
*/
if (resize_filter->filter == Box) filter_type=BoxFilter;
if (resize_filter->filter == Sinc) filter_type=SincFilter;
if (resize_filter->filter == SincFast) filter_type=SincFastFilter;
if (resize_filter->filter == Jinc) filter_type=JincFilter;
if (resize_filter->filter == CubicBC) filter_type=CubicFilter;
if (resize_filter->window == Box) window_type=BoxFilter;
if (resize_filter->window == Sinc) window_type=SincFilter;
if (resize_filter->window == SincFast) window_type=SincFastFilter;
if (resize_filter->window == Jinc) window_type=JincFilter;
if (resize_filter->window == CubicBC) window_type=CubicFilter;
/*
Report Filter Details.
*/
support=GetResizeFilterSupport(resize_filter); /* practical_support */
(void) FormatLocaleFile(stdout,"# Resampling Filter (for graphing)\n#\n");
(void) FormatLocaleFile(stdout,"# filter = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,filter_type));
(void) FormatLocaleFile(stdout,"# window = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,window_type));
(void) FormatLocaleFile(stdout,"# support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->support);
(void) FormatLocaleFile(stdout,"# window-support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->window_support);
(void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n",
GetMagickPrecision(), (double)resize_filter->blur);
if ( filter_type == GaussianFilter || window_type == GaussianFilter )
(void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n",
GetMagickPrecision(), (double)resize_filter->coefficient[0]);
if ( filter_type == KaiserFilter || window_type == KaiserFilter )
(void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n",
GetMagickPrecision(),
(double)resize_filter->coefficient[0]);
(void) FormatLocaleFile(stdout,"# practical-support = %.*g\n",
GetMagickPrecision(), (double)support);
if ( filter_type == CubicFilter || window_type == CubicFilter )
(void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n",
GetMagickPrecision(),(double)B, GetMagickPrecision(),(double)C);
(void) FormatLocaleFile(stdout,"\n");
/*
Output values of resulting filter graph -- for graphing
filter result.
*/
for (x=0.0; x <= support; x+=0.01f)
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,GetMagickPrecision(),
(double) GetResizeFilterWeight(resize_filter,x));
/* A final value so gnuplot can graph the 'stop' properly. */
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support,
GetMagickPrecision(),0.0);
}
/* Output the above once only for each image - remove setting */
(void) DeleteImageArtifact((Image *) image,"filter:verbose");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
}
#endif
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveResizeImage() adaptively resize image with pixel resampling.
%
% This is shortcut function for a fast interpolative resize using mesh
% interpolation. It works well for small resizes of less than +/- 50%
% of the original image size. For larger resizing on images a full
% filtered and slower resize function should be used instead.
%
% The format of the AdaptiveResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
return(InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel,
exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ B e s s e l O r d e r O n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BesselOrderOne() computes the Bessel function of x of the first kind of
% order 0. This is used to create the Jinc() filter function below.
%
% Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8]
%
% j1(x) = x*j1(x);
%
% For x in (8,inf)
%
% j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
%
% where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow:
%
% cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
% = 1/sqrt(2) * (sin(x) - cos(x))
% sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
% = -1/sqrt(2) * (sin(x) + cos(x))
%
% The format of the BesselOrderOne method is:
%
% MagickRealType BesselOrderOne(MagickRealType x)
%
% A description of each parameter follows:
%
% o x: MagickRealType value.
%
*/
#undef I0
static MagickRealType I0(MagickRealType x)
{
MagickRealType
sum,
t,
y;
register ssize_t
i;
/*
Zeroth order Bessel function of the first kind.
*/
sum=1.0;
y=x*x/4.0;
t=y;
for (i=2; t > MagickEpsilon; i++)
{
sum+=t;
t*=y/((MagickRealType) i*i);
}
return(sum);
}
#undef J1
static MagickRealType J1(MagickRealType x)
{
MagickRealType
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.581199354001606143928050809e+21,
-0.6672106568924916298020941484e+20,
0.2316433580634002297931815435e+19,
-0.3588817569910106050743641413e+17,
0.2908795263834775409737601689e+15,
-0.1322983480332126453125473247e+13,
0.3413234182301700539091292655e+10,
-0.4695753530642995859767162166e+7,
0.270112271089232341485679099e+4
},
Qone[] =
{
0.11623987080032122878585294e+22,
0.1185770712190320999837113348e+20,
0.6092061398917521746105196863e+17,
0.2081661221307607351240184229e+15,
0.5243710262167649715406728642e+12,
0.1013863514358673989967045588e+10,
0.1501793594998585505921097578e+7,
0.1606931573481487801970916749e+4,
0.1e+1
};
p=Pone[8];
q=Qone[8];
for (i=7; i >= 0; i--)
{
p=p*x*x+Pone[i];
q=q*x*x+Qone[i];
}
return(p/q);
}
#undef P1
static MagickRealType P1(MagickRealType x)
{
MagickRealType
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.352246649133679798341724373e+5,
0.62758845247161281269005675e+5,
0.313539631109159574238669888e+5,
0.49854832060594338434500455e+4,
0.2111529182853962382105718e+3,
0.12571716929145341558495e+1
},
Qone[] =
{
0.352246649133679798068390431e+5,
0.626943469593560511888833731e+5,
0.312404063819041039923015703e+5,
0.4930396490181088979386097e+4,
0.2030775189134759322293574e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
#undef Q1
static MagickRealType Q1(MagickRealType x)
{
MagickRealType
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.3511751914303552822533318e+3,
0.7210391804904475039280863e+3,
0.4259873011654442389886993e+3,
0.831898957673850827325226e+2,
0.45681716295512267064405e+1,
0.3532840052740123642735e-1
},
Qone[] =
{
0.74917374171809127714519505e+4,
0.154141773392650970499848051e+5,
0.91522317015169922705904727e+4,
0.18111867005523513506724158e+4,
0.1038187585462133728776636e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
static MagickRealType BesselOrderOne(MagickRealType x)
{
MagickRealType
p,
q;
if (x == 0.0)
return(0.0);
p=x;
if (x < 0.0)
x=(-x);
if (x < 8.0)
return(p*J1(x));
q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)-
cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+
cos((double) x))));
if (p < 0.0)
q=(-q);
return(q);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResizeFilter() destroy the resize filter.
%
% The format of the DestroyResizeFilter method is:
%
% ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o resize_filter: the resize filter.
%
*/
MagickExport ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
resize_filter->signature=(~MagickSignature);
resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter);
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r S u p p o r t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterSupport() return the current support window size for this
% filter. Note that this may have been enlarged by filter:blur factor.
%
% The format of the GetResizeFilterSupport method is:
%
% MagickRealType GetResizeFilterSupport(const ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o filter: Image filter to use.
%
*/
MagickExport MagickRealType GetResizeFilterSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return(resize_filter->support*resize_filter->blur);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r W e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterWeight evaluates the specified resize filter at the point x
% which usally lies between zero and the filters current 'support' and
% returns the weight of the filter function at that point.
%
% The format of the GetResizeFilterWeight method is:
%
% MagickRealType GetResizeFilterWeight(const ResizeFilter *resize_filter,
% const MagickRealType x)
%
% A description of each parameter follows:
%
% o filter: the filter type.
%
% o x: the point.
%
*/
MagickExport MagickRealType GetResizeFilterWeight(
const ResizeFilter *resize_filter,const MagickRealType x)
{
MagickRealType
scale,
weight,
x_blur;
/*
Windowing function - scale the weighting filter by this amount.
*/
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */
if ((resize_filter->window_support < MagickEpsilon) ||
(resize_filter->window == Box))
scale=1.0; /* Point or Box Filter -- avoid division by zero */
else
{
scale=resize_filter->scale;
scale=resize_filter->window(x_blur*scale,resize_filter);
}
weight=scale*resize_filter->filter(x_blur,resize_filter);
return(weight);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolativeResizeImage() resizes an image using the specified
% interpolation method.
%
% The format of the InterpolativeResizeImage method is:
%
% Image *InterpolativeResizeImage(const Image *image,const size_t columns,
% const size_t rows,const InterpolatePixelMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *InterpolativeResizeImage(const Image *image,
const size_t columns,const size_t rows,const InterpolatePixelMethod method,
ExceptionInfo *exception)
{
#define InterpolativeResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
Image
*resize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
scale;
ssize_t
y;
/*
Interpolatively resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(resize_image,DirectClass) == MagickFalse)
{
InheritException(exception,&resize_image->exception);
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
scale.x=(double) image->columns/resize_image->columns;
scale.y=(double) image->rows/resize_image->rows;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
MagickPixelPacket
pixel;
PointInfo
offset;
register IndexPacket
*restrict resize_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
continue;
resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view);
GetMagickPixelPacket(image,&pixel);
offset.y=((MagickRealType) y+0.5)*scale.y-0.5;
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
offset.x=((MagickRealType) x+0.5)*scale.x-0.5;
(void) InterpolateMagickPixelPacket(image,image_view,method,offset.x,
offset.y,&pixel,exception);
SetPixelPacket(resize_image,&pixel,q,resize_indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
continue;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InterpolativeResizeImage)
#endif
proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
resize_image=DestroyImage(resize_image);
return(resize_image);
}
#if defined(MAGICKCORE_LQR_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i q u i d R e s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiquidRescaleImage() rescales image with seam carving.
%
% The format of the LiquidRescaleImage method is:
%
% Image *LiquidRescaleImage(const Image *image,
% const size_t columns,const size_t rows,
% const double delta_x,const double rigidity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the rescaled image.
%
% o rows: the number of rows in the rescaled image.
%
% o delta_x: maximum seam transversal step (0 means straight seams).
%
% o rigidity: introduce a bias for non-straight seams (typically 0).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns,
const size_t rows,const double delta_x,const double rigidity,
ExceptionInfo *exception)
{
#define LiquidRescaleImageTag "Rescale/Image"
CacheView
*rescale_view;
const char
*map;
guchar
*packet;
Image
*rescale_image;
int
x,
y;
LqrCarver
*carver;
LqrRetVal
lqr_status;
MagickBooleanType
status;
MagickPixelPacket
pixel;
unsigned char
*pixels;
/*
Liquid rescale image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
if ((columns <= 2) || (rows <= 2))
return(ResizeImage(image,columns,rows,image->filter,image->blur,exception));
if ((columns >= (2*image->columns)) || (rows >= (2*image->rows)))
{
Image
*resize_image;
size_t
height,
width;
/*
Honor liquid resize size limitations.
*/
for (width=image->columns; columns >= (2*width-1); width*=2);
for (height=image->rows; rows >= (2*height-1); height*=2);
resize_image=ResizeImage(image,width,height,image->filter,image->blur,
exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
rescale_image=LiquidRescaleImage(resize_image,columns,rows,delta_x,
rigidity,exception);
resize_image=DestroyImage(resize_image);
return(rescale_image);
}
map="RGB";
if (image->matte == MagickFalse)
map="RGBA";
if (image->colorspace == CMYKColorspace)
{
map="CMYK";
if (image->matte == MagickFalse)
map="CMYKA";
}
pixels=(unsigned char *) AcquireQuantumMemory(image->columns,image->rows*
strlen(map)*sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
return((Image *) NULL);
status=ExportImagePixels(image,0,0,image->columns,image->rows,map,CharPixel,
pixels,exception);
if (status == MagickFalse)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
carver=lqr_carver_new(pixels,image->columns,image->rows,strlen(map));
if (carver == (LqrCarver *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity);
lqr_status=lqr_carver_resize(carver,columns,rows);
(void) lqr_status;
rescale_image=CloneImage(image,lqr_carver_get_width(carver),
lqr_carver_get_height(carver),MagickTrue,exception);
if (rescale_image == (Image *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return((Image *) NULL);
}
if (SetImageStorageClass(rescale_image,DirectClass) == MagickFalse)
{
InheritException(exception,&rescale_image->exception);
rescale_image=DestroyImage(rescale_image);
return((Image *) NULL);
}
GetMagickPixelPacket(rescale_image,&pixel);
(void) lqr_carver_scan_reset(carver);
rescale_view=AcquireAuthenticCacheView(rescale_image,exception);
while (lqr_carver_scan(carver,&x,&y,&packet) != 0)
{
register IndexPacket
*restrict rescale_indexes;
register PixelPacket
*restrict q;
q=QueueCacheViewAuthenticPixels(rescale_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
rescale_indexes=GetCacheViewAuthenticIndexQueue(rescale_view);
pixel.red=QuantumRange*(packet[0]/255.0);
pixel.green=QuantumRange*(packet[1]/255.0);
pixel.blue=QuantumRange*(packet[2]/255.0);
if (image->colorspace != CMYKColorspace)
{
if (image->matte == MagickFalse)
pixel.opacity=QuantumRange*(packet[3]/255.0);
}
else
{
pixel.index=QuantumRange*(packet[3]/255.0);
if (image->matte == MagickFalse)
pixel.opacity=QuantumRange*(packet[4]/255.0);
}
SetPixelPacket(rescale_image,&pixel,q,rescale_indexes);
if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse)
break;
}
rescale_view=DestroyCacheView(rescale_view);
/*
Relinquish resources.
*/
lqr_carver_destroy(carver);
return(rescale_image);
}
#else
MagickExport Image *LiquidRescaleImage(const Image *image,
const size_t magick_unused(columns),const size_t magick_unused(rows),
const double magick_unused(delta_x),const double magick_unused(rigidity),
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError,
"DelegateLibrarySupportNotBuiltIn","`%s' (LQR)",image->filename);
return((Image *) NULL);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagnifyImage() doubles the size of the image with a pixel art scaling
% algorithm.
%
% The format of the MagnifyImage method is:
%
% Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
{
#define MagnifyImageTag "Magnify/Image"
CacheView
*image_view,
*magnify_view;
Image
*magnify_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize magnified image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
magnify_image=CloneImage(image,2*image->columns,2*image->rows,MagickTrue,
exception);
if (magnify_image == (Image *) NULL)
return((Image *) NULL);
/*
Magnify image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
magnify_view=AcquireAuthenticCacheView(magnify_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,magnify_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict magnify_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(magnify_view,0,2*y,magnify_image->columns,2,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
magnify_indexes=GetCacheViewAuthenticIndexQueue(magnify_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity[9];
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register PixelPacket
*restrict r;
register ssize_t
i;
/*
Magnify this row of pixels.
*/
p=GetCacheViewVirtualPixels(image_view,x-1,y-1,3,3,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (i=0; i < 9; i++)
intensity[i]=GetPixelIntensity(image,p+i);
r=q;
if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) ||
(fabs(intensity[3]-intensity[5]) < MagickEpsilon))
{
/*
Clone center pixel.
*/
*r=p[4];
r++;
*r=p[4];
r+=(magnify_image->columns-1);
*r=p[4];
r++;
*r=p[4];
}
else
{
/*
Selectively clone pixel.
*/
if (fabs(intensity[1]-intensity[3]) < MagickEpsilon)
*r=p[3];
else
*r=p[4];
r++;
if (fabs(intensity[1]-intensity[5]) < MagickEpsilon)
*r=p[5];
else
*r=p[4];
r+=(magnify_image->columns-1);
if (fabs(intensity[3]-intensity[7]) < MagickEpsilon)
*r=p[3];
else
*r=p[4];
r++;
if (fabs(intensity[5]-intensity[7]) < MagickEpsilon)
*r=p[5];
else
*r=p[4];
}
if (indexes != (const IndexPacket *) NULL)
{
register IndexPacket
*r;
/*
Magnify the colormap indexes.
*/
r=magnify_indexes;
if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) ||
(fabs(intensity[3]-intensity[5]) < MagickEpsilon))
{
/*
Clone center pixel.
*/
*r=indexes[4];
r++;
*r=indexes[4];
r+=(magnify_image->columns-1);
*r=indexes[4];
r++;
*r=indexes[4];
}
else
{
/*
Selectively clone pixel.
*/
if (fabs(intensity[1]-intensity[3]) < MagickEpsilon)
*r=indexes[3];
else
*r=indexes[4];
r++;
if (fabs(intensity[1]-intensity[5]) < MagickEpsilon)
*r=indexes[5];
else
*r=indexes[4];
r+=(magnify_image->columns-1);
if (fabs(intensity[3]-intensity[7]) < MagickEpsilon)
*r=indexes[3];
else
*r=indexes[4];
r++;
if (fabs(intensity[5]-intensity[7]) < MagickEpsilon)
*r=indexes[5];
else
*r=indexes[4];
}
magnify_indexes+=2;
}
q+=2;
}
if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MagnifyImage)
#endif
proceed=SetImageProgress(image,MagnifyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
magnify_view=DestroyCacheView(magnify_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
magnify_image=DestroyImage(magnify_image);
return(magnify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinifyImage() is a convenience method that scales an image proportionally to
% half its size.
%
% The format of the MinifyImage method is:
%
% Image *MinifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*minify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter,
1.0,exception);
return(minify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResampleImage() resize image in terms of its pixel size, so that when
% displayed at the given resolution it will be the same size in terms of
% real world units as the original image at the original resolution.
%
% The format of the ResampleImage method is:
%
% Image *ResampleImage(Image *image,const double x_resolution,
% const double y_resolution,const FilterTypes filter,const double blur,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be resized to fit the given resolution.
%
% o x_resolution: the new image x resolution.
%
% o y_resolution: the new image y resolution.
%
% o filter: Image filter to use.
%
% o blur: the blur factor where > 1 is blurry, < 1 is sharp.
%
*/
MagickExport Image *ResampleImage(const Image *image,const double x_resolution,
const double y_resolution,const FilterTypes filter,const double blur,
ExceptionInfo *exception)
{
#define ResampleImageTag "Resample/Image"
Image
*resample_image;
size_t
height,
width;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=(size_t) (x_resolution*image->columns/(image->x_resolution == 0.0 ?
72.0 : image->x_resolution)+0.5);
height=(size_t) (y_resolution*image->rows/(image->y_resolution == 0.0 ?
72.0 : image->y_resolution)+0.5);
resample_image=ResizeImage(image,width,height,filter,blur,exception);
if (resample_image != (Image *) NULL)
{
resample_image->x_resolution=x_resolution;
resample_image->y_resolution=y_resolution;
}
return(resample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResizeImage() scales an image to the desired dimensions, using the given
% filter (see AcquireFilterInfo()).
%
% If an undefined filter is given the filter defaults to Mitchell for a
% colormapped image, a image with a matte channel, or if the image is
% enlarged. Otherwise the filter defaults to a Lanczos.
%
% ResizeImage() was inspired by Paul Heckbert's "zoom" program.
%
% The format of the ResizeImage method is:
%
% Image *ResizeImage(Image *image,const size_t columns,
% const size_t rows,const FilterTypes filter,const double blur,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o filter: Image filter to use.
%
% o blur: the blur factor where > 1 is blurry, < 1 is sharp. Typically set
% this to 1.0.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _ContributionInfo
{
MagickRealType
weight;
ssize_t
pixel;
} ContributionInfo;
static ContributionInfo **DestroyContributionThreadSet(
ContributionInfo **contribution)
{
register ssize_t
i;
assert(contribution != (ContributionInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (contribution[i] != (ContributionInfo *) NULL)
contribution[i]=(ContributionInfo *) RelinquishAlignedMemory(
contribution[i]);
contribution=(ContributionInfo **) RelinquishMagickMemory(contribution);
return(contribution);
}
static ContributionInfo **AcquireContributionThreadSet(const size_t count)
{
register ssize_t
i;
ContributionInfo
**contribution;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads,
sizeof(*contribution));
if (contribution == (ContributionInfo **) NULL)
return((ContributionInfo **) NULL);
(void) ResetMagickMemory(contribution,0,number_threads*sizeof(*contribution));
for (i=0; i < (ssize_t) number_threads; i++)
{
contribution[i]=(ContributionInfo *) MagickAssumeAligned(
AcquireAlignedMemory(count,sizeof(**contribution)));
if (contribution[i] == (ContributionInfo *) NULL)
return(DestroyContributionThreadSet(contribution));
}
return(contribution);
}
static inline double MagickMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const MagickRealType x_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
#define ResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**restrict contributions;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
scale,
support;
ssize_t
x;
/*
Apply filter to resize horizontally from image to resize image.
*/
scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class) == MagickFalse)
{
InheritException(exception,&resize_image->exception);
return(MagickFalse);
}
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point
sampling.
*/
support=(MagickRealType) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
(void) ResetMagickMemory(&zero,0,sizeof(zero));
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,resize_image,resize_image->columns,1)
#endif
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
MagickRealType
bisect,
density;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ContributionInfo
*restrict contribution;
register IndexPacket
*restrict resize_indexes;
register PixelPacket
*restrict q;
register ssize_t
y;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(MagickRealType) (x+0.5)/x_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns);
density=0.0;
contribution=contributions[GetOpenMPThreadId()];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((MagickRealType) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t)
(contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception);
q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view);
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
MagickPixelPacket
pixel;
MagickRealType
alpha;
register ssize_t
i;
ssize_t
j;
pixel=zero;
if (image->matte == MagickFalse)
{
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight;
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=alpha*GetPixelOpacity(p+j);
}
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight;
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+y,ClampToQuantum(pixel.index));
}
}
else
{
double
gamma;
gamma=0.0;
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j);
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j);
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j);
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+y,ClampToQuantum(gamma*pixel.index));
}
}
if ((resize_image->storage_class == PseudoClass) &&
(image->storage_class == PseudoClass))
{
i=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-
1.0)+0.5);
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i-start].pixel-contribution[0].pixel);
SetPixelIndex(resize_indexes+y,GetPixelIndex(indexes+j));
}
q++;
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HorizontalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const MagickRealType y_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**restrict contributions;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
scale,
support;
ssize_t
y;
/*
Apply filter to resize vertically from image to resize image.
*/
scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class) == MagickFalse)
{
InheritException(exception,&resize_image->exception);
return(MagickFalse);
}
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point
sampling.
*/
support=(MagickRealType) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
(void) ResetMagickMemory(&zero,0,sizeof(zero));
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
MagickRealType
bisect,
density;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ContributionInfo
*restrict contribution;
register IndexPacket
*restrict resize_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(MagickRealType) (y+0.5)/y_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows);
density=0.0;
contribution=contributions[GetOpenMPThreadId()];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((MagickRealType) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel,
image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),
exception);
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view);
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
MagickPixelPacket
pixel;
MagickRealType
alpha;
register ssize_t
i;
ssize_t
j;
pixel=zero;
if (image->matte == MagickFalse)
{
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight;
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=alpha*GetPixelOpacity(p+j);
}
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight;
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+x,ClampToQuantum(pixel.index));
}
}
else
{
double
gamma;
gamma=0.0;
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j);
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j);
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight*QuantumScale*GetPixelAlpha(p+j);
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+x,ClampToQuantum(gamma*pixel.index));
}
}
if ((resize_image->storage_class == PseudoClass) &&
(image->storage_class == PseudoClass))
{
i=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-
1.0)+0.5);
j=(ssize_t) ((contribution[i-start].pixel-contribution[0].pixel)*
image->columns+x);
SetPixelIndex(resize_indexes+x,GetPixelIndex(indexes+j));
}
q++;
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_VerticalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
MagickExport Image *ResizeImage(const Image *image,const size_t columns,
const size_t rows,const FilterTypes filter,const double blur,
ExceptionInfo *exception)
{
FilterTypes
filter_type;
Image
*filter_image,
*resize_image;
MagickOffsetType
offset;
MagickRealType
x_factor,
y_factor;
MagickSizeType
span;
MagickStatusType
status;
ResizeFilter
*resize_filter;
/*
Acquire resize image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows) &&
(filter == UndefinedFilter) && (blur == 1.0))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return(resize_image);
/*
Acquire resize filter.
*/
x_factor=(MagickRealType) columns/(MagickRealType) image->columns;
y_factor=(MagickRealType) rows/(MagickRealType) image->rows;
if (x_factor > y_factor)
filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception);
else
filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception);
if (filter_image == (Image *) NULL)
return(DestroyImage(resize_image));
filter_type=LanczosFilter;
if (filter != UndefinedFilter)
filter_type=filter;
else
if ((x_factor == 1.0) && (y_factor == 1.0))
filter_type=PointFilter;
else
if ((image->storage_class == PseudoClass) ||
(image->matte != MagickFalse) || ((x_factor*y_factor) > 1.0))
filter_type=MitchellFilter;
resize_filter=AcquireResizeFilter(image,filter_type,blur,MagickFalse,
exception);
/*
Resize image.
*/
offset=0;
if (x_factor > y_factor)
{
span=(MagickSizeType) (filter_image->columns+rows);
status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span,
&offset,exception);
status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor,
span,&offset,exception);
}
else
{
span=(MagickSizeType) (filter_image->rows+columns);
status=VerticalFilter(resize_filter,image,filter_image,y_factor,span,
&offset,exception);
status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor,
span,&offset,exception);
}
/*
Free resources.
*/
filter_image=DestroyImage(filter_image);
resize_filter=DestroyResizeFilter(resize_filter);
if (status == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
resize_image->type=image->type;
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SampleImage() scales an image to the desired dimensions with pixel
% sampling. Unlike other scaling methods, this method does not introduce
% any additional color into the scaled image.
%
% The format of the SampleImage method is:
%
% Image *SampleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the sampled image.
%
% o rows: the number of rows in the sampled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SampleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleImageTag "Sample/Image"
CacheView
*image_view,
*sample_view;
Image
*sample_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
x;
ssize_t
*x_offset,
y;
PointInfo
sample_offset;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
sample_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
/*
Check for posible user defined sampling offset Artifact
The default sampling offset is in the mid-point of sample regions.
*/
sample_offset.x=sample_offset.y=0.5-MagickEpsilon;
{
const char
*value;
value=GetImageArtifact(image,"sample:offset");
if (value != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
(void) ParseGeometry(value,&geometry_info);
flags=ParseGeometry(value,&geometry_info);
sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon;
if ((flags & SigmaValue) != 0)
sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon;
}
}
/*
Allocate scan line buffer and column offset buffers.
*/
x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns,
sizeof(*x_offset));
if (x_offset == (ssize_t *) NULL)
{
sample_image=DestroyImage(sample_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (x=0; x < (ssize_t) sample_image->columns; x++)
x_offset[x]=(ssize_t) ((((double) x+sample_offset.x)*image->columns)/
sample_image->columns);
/*
Sample each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sample_view=AcquireAuthenticCacheView(sample_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,sample_image,1,1)
#endif
for (y=0; y < (ssize_t) sample_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict sample_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
ssize_t
y_offset;
if (status == MagickFalse)
continue;
y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/
sample_image->rows);
p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
sample_indexes=GetCacheViewAuthenticIndexQueue(sample_view);
/*
Sample each column.
*/
for (x=0; x < (ssize_t) sample_image->columns; x++)
*q++=p[x_offset[x]];
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
for (x=0; x < (ssize_t) sample_image->columns; x++)
SetPixelIndex(sample_indexes+x,GetPixelIndex(indexes+x_offset[x]));
if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SampleImage)
#endif
proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sample_view=DestroyCacheView(sample_view);
x_offset=(ssize_t *) RelinquishMagickMemory(x_offset);
sample_image->type=image->type;
if (status == MagickFalse)
sample_image=DestroyImage(sample_image);
return(sample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleImage() changes the size of an image to the given dimensions.
%
% The format of the ScaleImage method is:
%
% Image *ScaleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ScaleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define ScaleImageTag "Scale/Image"
CacheView
*image_view,
*scale_view;
Image
*scale_image;
MagickBooleanType
next_column,
next_row,
proceed,
status;
MagickPixelPacket
pixel,
*scale_scanline,
*scanline,
*x_vector,
*y_vector,
zero;
MagickRealType
alpha;
PointInfo
scale,
span;
register ssize_t
i;
ssize_t
number_rows,
y;
/*
Initialize scaled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
scale_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (scale_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(scale_image,DirectClass) == MagickFalse)
{
InheritException(exception,&scale_image->exception);
scale_image=DestroyImage(scale_image);
return((Image *) NULL);
}
/*
Allocate memory.
*/
x_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns,
sizeof(*x_vector));
scanline=x_vector;
if (image->rows != scale_image->rows)
scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns,
sizeof(*scanline));
scale_scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t)
scale_image->columns,sizeof(*scale_scanline));
y_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns,
sizeof(*y_vector));
if ((scanline == (MagickPixelPacket *) NULL) ||
(scale_scanline == (MagickPixelPacket *) NULL) ||
(x_vector == (MagickPixelPacket *) NULL) ||
(y_vector == (MagickPixelPacket *) NULL))
{
scale_image=DestroyImage(scale_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Scale image.
*/
number_rows=0;
next_row=MagickTrue;
span.y=1.0;
scale.y=(double) scale_image->rows/(double) image->rows;
(void) ResetMagickMemory(y_vector,0,(size_t) image->columns*
sizeof(*y_vector));
GetMagickPixelPacket(image,&pixel);
(void) ResetMagickMemory(&zero,0,sizeof(zero));
i=0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
scale_view=AcquireAuthenticCacheView(scale_image,exception);
for (y=0; y < (ssize_t) scale_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict scale_indexes;
register MagickPixelPacket
*restrict s,
*restrict t;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
break;
q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
alpha=1.0;
scale_indexes=GetCacheViewAuthenticIndexQueue(scale_view);
if (scale_image->rows == image->rows)
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(p);
x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p));
x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p));
x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p));
if (image->matte != MagickFalse)
x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p);
if (indexes != (IndexPacket *) NULL)
x_vector[x].index=(MagickRealType) (alpha*GetPixelIndex(indexes+x));
p++;
}
}
else
{
/*
Scale Y direction.
*/
while (scale.y < span.y)
{
if ((next_row != MagickFalse) &&
(number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(p);
x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p));
x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p));
x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p));
if (image->matte != MagickFalse)
x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p);
if (indexes != (IndexPacket *) NULL)
x_vector[x].index=(MagickRealType) (alpha*
GetPixelIndex(indexes+x));
p++;
}
number_rows++;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
y_vector[x].red+=scale.y*x_vector[x].red;
y_vector[x].green+=scale.y*x_vector[x].green;
y_vector[x].blue+=scale.y*x_vector[x].blue;
if (scale_image->matte != MagickFalse)
y_vector[x].opacity+=scale.y*x_vector[x].opacity;
if (scale_indexes != (IndexPacket *) NULL)
y_vector[x].index+=scale.y*x_vector[x].index;
}
span.y-=scale.y;
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(p);
x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p));
x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p));
x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p));
if (image->matte != MagickFalse)
x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p);
if (indexes != (IndexPacket *) NULL)
x_vector[x].index=(MagickRealType) (alpha*
GetPixelIndex(indexes+x));
p++;
}
number_rows++;
next_row=MagickFalse;
}
s=scanline;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel.red=y_vector[x].red+span.y*x_vector[x].red;
pixel.green=y_vector[x].green+span.y*x_vector[x].green;
pixel.blue=y_vector[x].blue+span.y*x_vector[x].blue;
if (image->matte != MagickFalse)
pixel.opacity=y_vector[x].opacity+span.y*x_vector[x].opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index=y_vector[x].index+span.y*x_vector[x].index;
s->red=pixel.red;
s->green=pixel.green;
s->blue=pixel.blue;
if (scale_image->matte != MagickFalse)
s->opacity=pixel.opacity;
if (scale_indexes != (IndexPacket *) NULL)
s->index=pixel.index;
s++;
y_vector[x]=zero;
}
scale.y-=span.y;
if (scale.y <= 0)
{
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
span.y=1.0;
}
if (scale_image->columns == image->columns)
{
/*
Transfer scanline to scaled image.
*/
s=scanline;
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (scale_image->matte != MagickFalse)
alpha=QuantumScale*(QuantumRange-s->opacity);
alpha=PerceptibleReciprocal(alpha);
SetPixelRed(q,ClampToQuantum(alpha*s->red));
SetPixelGreen(q,ClampToQuantum(alpha*s->green));
SetPixelBlue(q,ClampToQuantum(alpha*s->blue));
if (scale_image->matte != MagickFalse)
SetPixelOpacity(q,ClampToQuantum(s->opacity));
if (scale_indexes != (IndexPacket *) NULL)
SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*s->index));
q++;
s++;
}
}
else
{
/*
Scale X direction.
*/
pixel=zero;
next_column=MagickFalse;
span.x=1.0;
s=scanline;
t=scale_scanline;
for (x=0; x < (ssize_t) image->columns; x++)
{
scale.x=(double) scale_image->columns/(double) image->columns;
while (scale.x >= span.x)
{
if (next_column != MagickFalse)
{
pixel=zero;
t++;
}
pixel.red+=span.x*s->red;
pixel.green+=span.x*s->green;
pixel.blue+=span.x*s->blue;
if (image->matte != MagickFalse)
pixel.opacity+=span.x*s->opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index+=span.x*s->index;
t->red=pixel.red;
t->green=pixel.green;
t->blue=pixel.blue;
if (scale_image->matte != MagickFalse)
t->opacity=pixel.opacity;
if (scale_indexes != (IndexPacket *) NULL)
t->index=pixel.index;
scale.x-=span.x;
span.x=1.0;
next_column=MagickTrue;
}
if (scale.x > 0)
{
if (next_column != MagickFalse)
{
pixel=zero;
next_column=MagickFalse;
t++;
}
pixel.red+=scale.x*s->red;
pixel.green+=scale.x*s->green;
pixel.blue+=scale.x*s->blue;
if (scale_image->matte != MagickFalse)
pixel.opacity+=scale.x*s->opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index+=scale.x*s->index;
span.x-=scale.x;
}
s++;
}
if (span.x > 0)
{
s--;
pixel.red+=span.x*s->red;
pixel.green+=span.x*s->green;
pixel.blue+=span.x*s->blue;
if (scale_image->matte != MagickFalse)
pixel.opacity+=span.x*s->opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index+=span.x*s->index;
}
if ((next_column == MagickFalse) &&
((ssize_t) (t-scale_scanline) < (ssize_t) scale_image->columns))
{
t->red=pixel.red;
t->green=pixel.green;
t->blue=pixel.blue;
if (scale_image->matte != MagickFalse)
t->opacity=pixel.opacity;
if (scale_indexes != (IndexPacket *) NULL)
t->index=pixel.index;
}
/*
Transfer scanline to scaled image.
*/
t=scale_scanline;
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (scale_image->matte != MagickFalse)
alpha=QuantumScale*(QuantumRange-t->opacity);
alpha=PerceptibleReciprocal(alpha);
SetPixelRed(q,ClampToQuantum(alpha*t->red));
SetPixelGreen(q,ClampToQuantum(alpha*t->green));
SetPixelBlue(q,ClampToQuantum(alpha*t->blue));
if (scale_image->matte != MagickFalse)
SetPixelOpacity(q,ClampToQuantum(t->opacity));
if (scale_indexes != (IndexPacket *) NULL)
SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*t->index));
t++;
q++;
}
}
if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse)
{
status=MagickFalse;
break;
}
proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
scale_view=DestroyCacheView(scale_view);
image_view=DestroyCacheView(image_view);
/*
Free allocated memory.
*/
y_vector=(MagickPixelPacket *) RelinquishMagickMemory(y_vector);
scale_scanline=(MagickPixelPacket *) RelinquishMagickMemory(scale_scanline);
if (scale_image->rows != image->rows)
scanline=(MagickPixelPacket *) RelinquishMagickMemory(scanline);
x_vector=(MagickPixelPacket *) RelinquishMagickMemory(x_vector);
scale_image->type=image->type;
if (status == MagickFalse)
scale_image=DestroyImage(scale_image);
return(scale_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h u m b n a i l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThumbnailImage() changes the size of an image to the given dimensions and
% removes any associated profiles. The goal is to produce small low cost
% thumbnail images suited for display on the Web.
%
% The format of the ThumbnailImage method is:
%
% Image *ThumbnailImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ThumbnailImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleFactor 5
char
value[MaxTextExtent];
const char
*name;
Image
*thumbnail_image;
MagickRealType
x_factor,
y_factor;
size_t
version;
struct stat
attributes;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
x_factor=(MagickRealType) columns/(MagickRealType) image->columns;
y_factor=(MagickRealType) rows/(MagickRealType) image->rows;
if ((x_factor*y_factor) > 0.1)
thumbnail_image=ResizeImage(image,columns,rows,image->filter,image->blur,
exception);
else
if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128))
thumbnail_image=ResizeImage(image,columns,rows,image->filter,
image->blur,exception);
else
{
Image
*sample_image;
sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows,
exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter,
image->blur,exception);
sample_image=DestroyImage(sample_image);
}
if (thumbnail_image == (Image *) NULL)
return(thumbnail_image);
(void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page);
if (thumbnail_image->matte == MagickFalse)
(void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel);
thumbnail_image->depth=8;
thumbnail_image->interlace=NoInterlace;
/*
Strip all profiles except color profiles.
*/
ResetImageProfileIterator(thumbnail_image);
for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; )
{
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
{
(void) DeleteImageProfile(thumbnail_image,name);
ResetImageProfileIterator(thumbnail_image);
}
name=GetNextImageProfile(thumbnail_image);
}
(void) DeleteImageProperty(thumbnail_image,"comment");
(void) CopyMagickString(value,image->magick_filename,MaxTextExtent);
if (strstr(image->magick_filename,"//") == (char *) NULL)
(void) FormatLocaleString(value,MaxTextExtent,"file://%s",
image->magick_filename);
(void) SetImageProperty(thumbnail_image,"Thumb::URI",value);
(void) CopyMagickString(value,image->magick_filename,MaxTextExtent);
if (GetPathAttributes(image->filename,&attributes) != MagickFalse)
{
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
attributes.st_mtime);
(void) SetImageProperty(thumbnail_image,"Thumb::MTime",value);
}
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
attributes.st_mtime);
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,value);
(void) ConcatenateMagickString(value,"B",MaxTextExtent);
(void) SetImageProperty(thumbnail_image,"Thumb::Size",value);
(void) FormatLocaleString(value,MaxTextExtent,"image/%s",image->magick);
LocaleLower(value);
(void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value);
(void) SetImageProperty(thumbnail_image,"software",
GetMagickVersion(&version));
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
image->magick_columns);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value);
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
image->magick_rows);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::height",value);
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
GetImageListLength(image));
(void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value);
return(thumbnail_image);
}
|
ntlmv1_mschapv2_fmt_plug.c | /*
* Previous files MSCHAPv2_fmt_plug.c and NETNTLM_fmt_plug.c now merged into
* this one file, sharing functions.
*
* NETNTLM_fmt.c -- NTLM Challenge/Response
* Written by JoMo-Kun <jmk at foofus.net> in 2007
* and placed in the public domain.
*
* This algorithm is designed for performing brute-force cracking of the NTLM
* (version 1) challenge/response pairs exchanged during network-based
* authentication attempts [1]. The captured challenge/response pairs from these
* attempts should be stored using the L0phtCrack 2.0 LC format, specifically:
* username:unused:unused:lm response:ntlm response:challenge. For example:
*
* CORP\Administrator:::25B2B477CE101D83648BB087CE7A1C217F51C7FC64C0EBB1:
* C8BD0C1630A9ECF7A95F494A8F0B2CB4A3F25B1225514304:1122334455667788
*
* It should be noted that a NTLM authentication response is not same as a NTLM
* password hash, which can be extracted using tools such as FgDump [2]. NTLM
* responses can be gathered via normal network capture or via tools which
* perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can
* also be harvested using a modified Samba service [5] in conjunction with
* some trickery to convince the user to connect to it. I leave what that
* trickery may actually be as an exercise for the reader (HINT: Karma, NMB
* broadcasts, IE, Outlook, social engineering, ...).
*
* [1] http://davenport.sourceforge.net/ntlm.html#theNtlmResponse
* [2] http://www.foofus.net/~fizzgig/fgdump/
* [3] http://ettercap.sourceforge.net/
* [4] http://www.oxid.it/cain.html
* [5] http://www.foofus.net/jmk/smbchallenge.html
*
* This version supports Extended Session Security. This is what
* is used when the "LM" hash ends in 32 zeros:
*
* DOMAIN\User:::c70e4fb229437ef300000000000000000000000000000000:
* abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9:24ca92fdab441aa4
*
* MSCHAPv2_fmt.c -- Microsoft PPP CHAP Extensions, Version 2
* Written by JoMo-Kun <jmk at foofus.net> in 2010
* and placed in the public domain.
*
* Support for freeradius-wep-patch challenge/response format
* added by Linus Lüssing in 2012 and is licensed under CC0/PD terms:
* To the extent possible under law, Linus Lüssing has waived all copyright
* and related or neighboring rights to this work. This work is published from:
* Germany.
*
*
* This algorithm is designed for performing brute-force cracking of the
* MSCHAPv2 challenge/response sets exchanged during network-based
* authentication attempts. The captured challenge/response set from these
* attempts should be stored using the following format:
*
* USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE
* USERNAME::DOMAIN:AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE
* DOMAIN\USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE
* :::MSCHAPv2 CHALLENGE:MSCHAPv2 RESPONSE:
*
* For example:
* User:::5B5D7C7D7B3F2F3E3C2C602132262628:82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF:21402324255E262A28295F2B3A337C7E
* domain\fred:::56d64cbe7bad61349a0b752335100eaf:d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b:7f8a466cff2a6bf0c80218bbf56d76bc
*
* http://freeradius.org/rfc/rfc2759.txt
*
* Modified for performance and support for SSE2, NTLMv1 ESS, OMP and UTF-8, by
* magnum 2010-2011 and 2013.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_MSCHAPv2_new;
extern struct fmt_main fmt_NETNTLM_new;
#elif FMT_REGISTERS_H
john_register_one(&fmt_MSCHAPv2_new);
john_register_one(&fmt_NETNTLM_new);
#else
#include <string.h>
#include <openssl/des.h>
#include "arch.h"
#include "simd-intrinsics.h"
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD4)
#else
#ifdef _OPENMP
#ifndef OMP_SCALE
#define OMP_SCALE 4
#endif
#include <omp.h>
#endif
#endif
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "memory.h"
#include "sha.h"
#include "md4.h"
#include "md5.h"
#include "unicode.h"
#include "john.h"
#include "memdbg.h"
extern volatile int bench_running;
#ifndef uchar
#define uchar unsigned char
#endif
#define CHAP_FORMAT_LABEL "MSCHAPv2"
#define CHAP_FORMAT_NAME "C/R"
#define FORMAT_TAG "$MSCHAPv2$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define FORMAT_TAGN "$NETNTLM$"
#define FORMAT_TAGN_LEN (sizeof(FORMAT_TAGN)-1)
#define CHAP_USERNAME_LENGTH 256
#define CHAP_CHALLENGE_LENGTH 64
#define CHAP_TOTAL_LENGTH 13 + CHAP_USERNAME_LENGTH + CHAP_CHALLENGE_LENGTH + CIPHERTEXT_LENGTH
#define NTLM_FORMAT_LABEL "netntlm"
#define NTLM_FORMAT_NAME "NTLMv1 C/R"
#define NTLM_TOTAL_LENGTH (10 + 2 * 2 * SALT_SIZE + CIPHERTEXT_LENGTH)
#define ALGORITHM_NAME "MD4 DES (ESS MD5) " MD4_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define FULL_BINARY_SIZE (2 + 8 * 3)
#define BINARY_SIZE (2 + 8)
#define BINARY_ALIGN 2
#define SALT_SIZE 8
#define SALT_ALIGN MEM_ALIGN_WORD
#define CIPHERTEXT_LENGTH 48
#ifdef SIMD_COEF_32
#define PLAINTEXT_LENGTH 27
//#define SSE_OMP
#if defined (_OPENMP) && defined(SSE_OMP)
#define BLOCK_LOOPS (2048 / NBKEYS)
#else
#define BLOCK_LOOPS (1024 / NBKEYS)
#endif
#define MIN_KEYS_PER_CRYPT (NBKEYS * BLOCK_LOOPS)
#define MAX_KEYS_PER_CRYPT (NBKEYS * BLOCK_LOOPS)
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 )
#define GETOUTPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32*4 )
#else
#define PLAINTEXT_LENGTH 64
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 2048
#endif
#ifdef SIMD_COEF_32
static unsigned char *saved_key;
#else
static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1];
static int (*saved_len);
#endif
static unsigned short (*crypt_key);
static unsigned char *nthash;
static uint32_t *bitmap;
static int cmps_per_crypt, use_bitmap;
static int valid_i, valid_j;
static uchar *challenge;
static int keys_prepared;
static struct fmt_main *my;
static char *chap_long_to_short(char *orig); /* used to cannonicalize the MSCHAPv2 format */
static struct fmt_tests chap_tests[] = {
{"$MSCHAPv2$4c092fd3fd98236502e8591100046326$b912ce522524d33123a982cf330a57f8e953fa7974042b5d$6a4915d0ce61d42be533640a75391925$1111", "2222"},
{"$MSCHAPv2$5B5D7C7D7B3F2F3E3C2C602132262628$82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF$21402324255E262A28295F2B3A337C7E$User", "clientPass"},
{"$MSCHAPv2$d07054459a1fdbc266a006f0220e6fac$33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde$3545cb1d89b507a5de104435e81b14a4$testuser1", "Cricket8"},
{"$MSCHAPv2$56d64cbe7bad61349a0b752335100eaf$d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b$7f8a466cff2a6bf0c80218bbf56d76bc$fred", "OMG!BBQ!11!one"}, /* domain\fred */
#if PLAINTEXT_LENGTH >= 35
{"$MSCHAPv2$b3c42db475b881d3c52ff3923d7b3bf8$f07c7a4eb391f5debe32d814679a5a69661b86b33227c4f8$6321f8649b971bd11ce8d5cb22a4a738$bOb", "asdblahblahblahblahblahblahblahblah"}, /* WorkGroup\bOb */
#endif
{"$MSCHAPv2$d94e7c7972b2376b28c268583e162de7$eba25a3b04d2c7085d01f842e2befc91745c40db0f792356$0677ca7318fd7f65ae1b4f58c9f4f400$lameuser", ""}, /* no password */
{"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$foo4", "bar4" },
{"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$", "bar4" },
/* Ettercap generated three test vectors */
{"$MSCHAPv2$3D79CC8CDC0261D4$B700770725F87739ADB110B310D9A289CDBB550ADCA6CB86$solar", "solarisalwaysbusy"},
{"$MSCHAPv2$BA75EB14EFBFBF25$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$lulu", "password"},
{"$MSCHAPv2$95A87FA62EBCD2E3C8B09E1B448A6C72$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$E2AE0995EAAC6CEFF0D9757428B51509$lulu", "password"},
/* Single test vector from chapcrack's sample pcap file */
{"$MSCHAPv2$6D0E1C056CD94D5F$1C93ABCE815400686BAECA315F348469256420598A73AD49$moxie", "bPCFyF2uL1p5Lg5yrKmqmY"},
{"", "clientPass", {"User", "", "", "5B5D7C7D7B3F2F3E3C2C602132262628", "82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF", "21402324255E262A28295F2B3A337C7E"} },
{"", "Cricket8", {"testuser1", "", "", "d07054459a1fdbc266a006f0220e6fac", "33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde", "3545cb1d89b507a5de104435e81b14a4"} },
{"", "OMG!BBQ!11!one", {"domain\\fred", "", "", "56d64cbe7bad61349a0b752335100eaf", "d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b", "7f8a466cff2a6bf0c80218bbf56d76bc"} }, /* domain\fred */
{"", "", {"lameuser", "", "domain", "d94e7c7972b2376b28c268583e162de7", "eba25a3b04d2c7085d01f842e2befc91745c40db0f792356", "0677ca7318fd7f65ae1b4f58c9f4f400"} }, /* no password */
{NULL}
};
static struct fmt_tests ntlm_tests[] = {
{"$NETNTLM$1122334455667788$BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "g3rg3g3rg3g3rg3"},
#ifndef SIMD_COEF_32 /* exceeds max length for SSE */
{"$NETNTLM$1122334455667788$E463FAA5D868ECE20CAE622474A2F440A652D642156AF863", "M1xedC4se%^&*@)##(blahblah!@#"},
#endif
{"$NETNTLM$c75c20bff9baa71f4765f360625700b0$81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "password"},
{"$NETNTLM$1122334455667788$35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "FooBarGerg"},
{"$NETNTLM$1122334455667788$A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "visit www.foofus.net"},
{"$NETNTLM$24ca92fdab441aa4c70e4fb229437ef3$abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9", "longpassword"},
{"$NETNTLM$1122334455667788$B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "cory21"},
{"", "g3rg3g3rg3g3rg3", {"User", "", "", "lm-hash", "BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "1122334455667788"} },
{"", "FooBarGerg", {"User", "", "", "lm-hash", "35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "1122334455667788"} },
{"", "visit www.foofus.net", {"User", "", "", "lm-hash", "A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "1122334455667788"} },
{"", "password", {"ESS", "", "", "4765f360625700b000000000000000000000000000000000", "81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "c75c20bff9baa71f"} },
{"", "cory21", {"User", "", "", "lm-hash", "B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "1122334455667788"} },
{NULL}
};
inline static void setup_des_key(uchar key_56[], DES_key_schedule *ks)
{
DES_cblock key;
key[0] = key_56[0];
key[1] = (key_56[0] << 7) | (key_56[1] >> 1);
key[2] = (key_56[1] << 6) | (key_56[2] >> 2);
key[3] = (key_56[2] << 5) | (key_56[3] >> 3);
key[4] = (key_56[3] << 4) | (key_56[4] >> 4);
key[5] = (key_56[4] << 3) | (key_56[5] >> 5);
key[6] = (key_56[5] << 2) | (key_56[6] >> 6);
key[7] = (key_56[6] << 1);
DES_set_key(&key, ks);
}
static int chap_valid_long(char *ciphertext)
{
char *pos, *pos2;
if (ciphertext == NULL) return 0;
else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0;
if (strlen(ciphertext) > CHAP_TOTAL_LENGTH)
return 0;
/* Validate Authenticator/Server Challenge Length */
pos = &ciphertext[FORMAT_TAG_LEN];
for (pos2 = pos; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 2)) )
return 0;
/* Validate MSCHAPv2 Response Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) )
return 0;
/* Validate Peer/Client Challenge Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 2)) )
return 0;
/* Validate Username Length */
if (strlen(++pos2) > CHAP_USERNAME_LENGTH)
return 0;
return 1;
}
static int chap_valid_short(char *ciphertext)
{
char *pos, *pos2;
if (ciphertext == NULL) return 0;
else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0;
if (strlen(ciphertext) > CHAP_TOTAL_LENGTH)
return 0;
/* Validate MSCHAPv2 Challenge Length */
pos = &ciphertext[FORMAT_TAG_LEN];
for (pos2 = pos; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 4)) )
return 0;
/* Validate MSCHAPv2 Response Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) )
return 0;
return 1;
}
static void chap_get_challenge(const char *ciphertext,
unsigned char *binary_salt)
{
int i;
const char *pos = ciphertext + FORMAT_TAG_LEN;
for (i = 0; i < SALT_SIZE; i++)
binary_salt[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) +
atoi16[ARCH_INDEX(pos[i*2+1])];
}
/* Either the cipherext already contains the MSCHAPv2 Challenge (4 Bytes) or
we are going to calculate it via:
sha1(|Peer/Client Challenge (8 Bytes)|Authenticator/Server Challenge (8 Bytes)|Username (<=256)|)
NOTE, we now ONLY call this function the the short form. The long form gets converted into the short
form in either prepare or split function. The short form is cannonical form (Change made July, 2014, JimF)
*/
static void *chap_get_salt(char *ciphertext)
{
static unsigned char *binary_salt;
unsigned char digest[20];
if (!binary_salt)
binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
/* This is just to silence scan-build. It will never happen.
It is unclear why only this format gave warnings, many others do
similar things. */
if (!ciphertext)
return ciphertext;
memset(binary_salt, 0, SALT_SIZE);
memset(digest, 0, 20);
chap_get_challenge(ciphertext, binary_salt);
return (void*)binary_salt;
}
/*
* This function will convert long hashes, into short ones (the short is now cannonical format)
* converts
* $MSCHAPv2$95a87fa62ebcd2e3c8b09e1b448a6c72$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$e2ae0995eaac6ceff0d9757428b51509$lulu
* into
* $MSCHAPv2$ba75eb14efbfbf25$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$$
*
* This code was moved from get_salt().
*/
static char *chap_long_to_short(char *ciphertext) {
static char Buf[CHAP_TOTAL_LENGTH+1]; // larger than we need, but not a big deal
static SHA_CTX ctx;
unsigned char tmp[16];
unsigned char digest[20];
char *pos = NULL;
int i;
SHA1_Init(&ctx);
/* Peer Challenge */
pos = ciphertext + FORMAT_TAG_LEN + 16*2 + 1 + 24*2 + 1; /* Skip $MSCHAPv2$, Authenticator Challenge and Response Hash */
memset(tmp, 0, 16);
for (i = 0; i < 16; i++)
tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
SHA1_Update(&ctx, tmp, 16);
/* Authenticator Challenge */
pos = ciphertext + FORMAT_TAG_LEN; /* Skip $MSCHAPv2$ */
memset(tmp, 0, 16);
for (i = 0; i < 16; i++)
tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
SHA1_Update(&ctx, tmp, 16);
/* Username - Only the user name (as presented by the peer and
excluding any prepended domain name) is used as input to SHAUpdate()
*/
pos = ciphertext + FORMAT_TAG_LEN + 16*2 + 1 + 24*2 + 1 + 16*2 + 1; /* Skip $MSCHAPv2$, Authenticator, Response and Peer */
SHA1_Update(&ctx, pos, strlen(pos));
SHA1_Final(digest, &ctx);
// Ok, now we re-make our ciphertext buffer, into the short cannonical form.
strcpy(Buf, FORMAT_TAG);
pos = Buf + FORMAT_TAG_LEN;
for (i = 0; i < SALT_SIZE; i++) {
//binary_salt.u8[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
pos[(i<<1)] = itoa16[digest[i]>>4];
pos[(i<<1)+1] = itoa16[digest[i]&0xF];
}
memcpy(&pos[16], &ciphertext[42], CIPHERTEXT_LENGTH+2);
pos[16+CIPHERTEXT_LENGTH+2] = '$';
pos[16+CIPHERTEXT_LENGTH+3] = 0;
//printf ("short=%s original=%s\n", Buf, ciphertext);
return Buf;
}
static int chap_valid(char *ciphertext, struct fmt_main *pFmt)
{
char *cp = NULL;
if (chap_valid_short(ciphertext))
cp = ciphertext + FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 4 + 1;
else if (chap_valid_long(ciphertext))
cp = ciphertext + FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 2 + 1;
if (cp) {
uchar key[7] = {0, 0, 0, 0, 0, 0, 0};
DES_key_schedule ks;
DES_cblock b3cmp;
uchar binary[8];
DES_cblock *challenge = chap_get_salt(ciphertext);
int i, j;
cp += 2 * 8 * 2;
for (i = 0; i < 8; i++) {
binary[i] = atoi16[ARCH_INDEX(cp[i * 2])] << 4;
binary[i] |= atoi16[ARCH_INDEX(cp[i * 2 + 1])];
}
key[0] = valid_i; key[1] = valid_j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8))
return 1;
for (i = 0; i < 0x100; i++)
for (j = 0; j < 0x100; j++) {
key[0] = i; key[1] = j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8)) {
valid_i = i;
valid_j = j;
return 1;
}
}
#ifdef DEBUG
if (!bench_running)
fprintf(stderr, "Rejected MSCHAPv2 hash with "
"invalid 3rd block\n");
#endif
}
return 0;
}
static char *chap_prepare_long(char *split_fields[10])
{
char *username, *cp;
/* DOMAIN\USERNAME -or - USERNAME -- ignore DOMAIN */
if ((username = strstr(split_fields[0], "\\")) == NULL)
username = split_fields[0];
else
username++;
cp = mem_alloc(FORMAT_TAG_LEN+strlen(split_fields[3])+1+strlen(split_fields[4])+
1+strlen(split_fields[5])+1+strlen(username)+1);
sprintf(cp, "%s%s$%s$%s$%s", FORMAT_TAG, split_fields[3], split_fields[4],
split_fields[5], username);
if (chap_valid_long(cp)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *chap_prepare_short(char *split_fields[10])
{
char *cp;
cp = mem_alloc(FORMAT_TAG_LEN+strlen(split_fields[3])+1+strlen(split_fields[4])+
1+1+1);
sprintf(cp, "%s%s$%s$$", FORMAT_TAG, split_fields[3], split_fields[4]);
if (chap_valid_short(cp)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *chap_prepare(char *split_fields[10], struct fmt_main *pFmt)
{
char *ret;
if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN)) {
// check for a short format that has any extra trash fields, and if so remove them.
char *cp1, *cp2, *cp3;
cp1 = split_fields[1];
cp1 += FORMAT_TAG_LEN;
cp2 = strchr(cp1, '$');
ret = NULL;
if (cp2 && cp2-cp1 == CHAP_CHALLENGE_LENGTH/4) {
++cp2;
cp3 = strchr(cp2, '$');
if (cp3 && cp3-cp2 == CIPHERTEXT_LENGTH && (strlen(cp3) > 2 || cp3[2] != '$')) {
ret = str_alloc_copy(split_fields[1]);
ret[(cp3-split_fields[1]) + 1] = '$';
ret[(cp3-split_fields[1]) + 2] = 0;
//printf ("Here is the cut item: %s\n", ret);
}
}
}
else if (split_fields[0] && split_fields[3] && split_fields[4] &&
split_fields[5] &&
strlen(split_fields[3]) == CHAP_CHALLENGE_LENGTH/2 &&
strlen(split_fields[4]) == CIPHERTEXT_LENGTH &&
strlen(split_fields[5]) == CHAP_CHALLENGE_LENGTH/2)
ret = chap_prepare_long(split_fields);
else if (split_fields[0] && split_fields[3] && split_fields[4] &&
strlen(split_fields[3]) == CHAP_CHALLENGE_LENGTH/4 &&
strlen(split_fields[4]) == CIPHERTEXT_LENGTH)
ret = chap_prepare_short(split_fields);
else
ret = NULL;
if (ret && chap_valid_long(ret))
ret = chap_long_to_short(ret);
else if (chap_valid_long(split_fields[1]))
ret = chap_long_to_short(split_fields[1]);
return ret ? ret : split_fields[1];
}
static char *chap_split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CHAP_TOTAL_LENGTH + 1];
int i, j = 0;
memset(out, 0, CHAP_TOTAL_LENGTH + 1);
memcpy(out, ciphertext, strlen(ciphertext));
/* convert hashes to lower-case - exclude $MSCHAPv2 and USERNAME */
for (i = FORMAT_TAG_LEN; i < CHAP_TOTAL_LENGTH + 1 && j < 3; i++) {
if (out[i] >= 'A' && out[i] <= 'Z')
out[i] |= 0x20;
else if (out[i] == '$')
j++;
}
if (chap_valid_long(out))
return chap_long_to_short(out);
return out;
}
static void *ntlm_get_salt(char *ciphertext)
{
static uchar *binary_salt;
int i;
if (!binary_salt)
binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
if (ciphertext[25] == '$') {
// Server challenge
ciphertext += FORMAT_TAGN_LEN;
for (i = 0; i < SALT_SIZE; ++i)
binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) +
atoi16[ARCH_INDEX(ciphertext[i*2+1])];
} else {
uchar es_salt[2*SALT_SIZE], k1[2*SALT_SIZE];
MD5_CTX ctx;
ciphertext += FORMAT_TAGN_LEN;
// Extended Session Security,
// Concatenate Server & Client challenges
for (i = 0;i < 2 * SALT_SIZE; ++i)
es_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) +
atoi16[ARCH_INDEX(ciphertext[i*2+1])];
// MD5 the concatenated challenges, result is our key
MD5_Init(&ctx);
MD5_Update(&ctx, es_salt, 16);
MD5_Final((void*)k1, &ctx);
memcpy(binary_salt, k1, SALT_SIZE); // but only 8 bytes of it
}
return (void*)binary_salt;
}
static int ntlm_valid(char *ciphertext, struct fmt_main *self)
{
char *pos;
if (strncmp(ciphertext, FORMAT_TAGN, FORMAT_TAGN_LEN)!=0) return 0;
if ((strlen(ciphertext) != 74) && (strlen(ciphertext) != 90)) return 0;
if ((ciphertext[25] != '$') && (ciphertext[41] != '$')) return 0;
for (pos = &ciphertext[FORMAT_TAGN_LEN]; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++);
if (*pos != '$') return 0;
for (pos++; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++);
if (!*pos && ((pos - ciphertext - 26 == CIPHERTEXT_LENGTH) ||
(pos - ciphertext - 42 == CIPHERTEXT_LENGTH))) {
uchar key[7] = {0, 0, 0, 0, 0, 0, 0};
DES_key_schedule ks;
DES_cblock b3cmp;
uchar binary[8];
DES_cblock *challenge = ntlm_get_salt(ciphertext);
int i, j;
ciphertext = strrchr(ciphertext, '$') + 1 + 2 * 8 * 2;
for (i = 0; i < 8; i++) {
binary[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] << 4;
binary[i] |= atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])];
}
key[0] = valid_i; key[1] = valid_j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8))
return 1;
for (i = 0; i < 0x100; i++)
for (j = 0; j < 0x100; j++) {
key[0] = i; key[1] = j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8)) {
valid_i = i;
valid_j = j;
return 1;
}
}
#ifdef DEBUG
if (!bench_running)
fprintf(stderr, "Rejected NetNTLM hash with invalid "
"3rd block\n");
#endif
}
return 0;
}
static char *ntlm_prepare(char *split_fields[10], struct fmt_main *self)
{
char *cp;
char clientChal[17];
if (!strncmp(split_fields[1], FORMAT_TAGN, FORMAT_TAGN_LEN))
return split_fields[1];
if (!split_fields[3]||!split_fields[4]||!split_fields[5])
return split_fields[1];
if (strlen(split_fields[4]) != CIPHERTEXT_LENGTH)
return split_fields[1];
// this string suggests we have an improperly formatted NTLMv2
if (!strncmp(&split_fields[4][32], "0101000000000000", 16))
return split_fields[1];
// Ignore anonymous login (Username "", Password "")
if (split_fields[0] && strlen(split_fields[0]) == 0 &&
!strncasecmp(split_fields[3], "edb7398877d716be", 16) &&
!strncasecmp(split_fields[4], "42aeb71fbb6dc18499016b08"
"b178ba65430ad39ae2498629", 48))
return split_fields[1];
// Handle ESS (8 byte client challenge in "LM" field padded with zeros)
if (strlen(split_fields[3]) == 48 &&
!strncmp(&split_fields[3][16], "00000000000000000000000000000000",
32))
{
memcpy(clientChal, split_fields[3],16);
clientChal[16] = 0;
}
else
clientChal[0] = 0;
cp = mem_alloc(FORMAT_TAGN_LEN+strlen(split_fields[5])+strlen(clientChal)+1+
strlen(split_fields[4])+1);
sprintf(cp, "%s%s%s$%s", FORMAT_TAGN, split_fields[5], clientChal,
split_fields[4]);
if (ntlm_valid(cp,self)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *ntlm_split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[NTLM_TOTAL_LENGTH + 1];
memset(out, 0, NTLM_TOTAL_LENGTH + 1);
strcpy(out, ciphertext);
strlwr(&out[FORMAT_TAGN_LEN]); /* Exclude: $NETNTLM$ */
return out;
}
static void set_salt(void *salt)
{
challenge = salt;
}
// ISO-8859-1 to UCS-2, directly into vector key buffer
static void set_key_ansi(char *_key, int index)
{
#ifdef SIMD_COEF_32
const uchar *key = (uchar*)_key;
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)];
unsigned int len, temp2;
len = 0;
while((temp2 = *key++)) {
unsigned int temp;
if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1)
{
temp2 |= (temp << 16);
*keybuf_word = temp2;
}
else
{
temp2 |= (0x80 << 16);
*keybuf_word = temp2;
len++;
goto key_cleaning;
}
len += 2;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
((unsigned int*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) +
(unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4;
#else
#if ARCH_LITTLE_ENDIAN
UTF8 *s = (UTF8*)_key;
UTF16 *d = saved_key[index];
while (*s)
*d++ = *s++;
*d = 0;
saved_len[index] = (int)((char*)d - (char*)saved_key[index]);
#else
UTF8 *s = (UTF8*)_key;
UTF8 *d = (UTF8*)saved_key[index];
while (*s) {
*d++ = *s++;
++d;
}
*d = 0;
saved_len[index] = (int)((char*)d - (char*)saved_key[index]);
#endif
#endif
keys_prepared = 0;
}
// Legacy codepage to UCS-2, directly into vector key buffer
static void set_key_CP(char *_key, int index)
{
#ifdef SIMD_COEF_32
const uchar *key = (uchar*)_key;
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)];
unsigned int len, temp2;
len = 0;
while((temp2 = *key++)) {
unsigned int temp;
temp2 = CP_to_Unicode[temp2];
if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1)
{
temp = CP_to_Unicode[temp];
temp2 |= (temp << 16);
*keybuf_word = temp2;
} else {
temp2 |= (0x80 << 16);
*keybuf_word = temp2;
len++;
goto key_cleaning_enc;
}
len += 2;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
key_cleaning_enc:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
((unsigned int*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) +
(unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4;
#else
saved_len[index] = enc_to_utf16(saved_key[index],
PLAINTEXT_LENGTH + 1,
(uchar*)_key,
strlen(_key)) << 1;
if (saved_len[index] < 0)
saved_len[index] = strlen16(saved_key[index]);
#endif
keys_prepared = 0;
}
// UTF-8 to UCS-2, directly into vector key buffer
static void set_key_utf8(char *_key, int index)
{
#ifdef SIMD_COEF_32
const UTF8 *source = (UTF8*)_key;
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)];
UTF32 chl, chh = 0x80;
unsigned int len = 0;
while (*source) {
chl = *source;
if (chl >= 0xC0) {
unsigned int extraBytesToRead;
extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f];
switch (extraBytesToRead) {
#if NT_FULL_UNICODE
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
#endif
case 2:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chl -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
#if NT_FULL_UNICODE
if (chl > UNI_MAX_BMP) {
if (len == PLAINTEXT_LENGTH) {
chh = 0x80;
*keybuf_word = (chh << 16) | chl;
keybuf_word += SIMD_COEF_32;
break;
}
#define halfBase 0x0010000UL
#define halfShift 10
#define halfMask 0x3FFUL
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_LOW_START (UTF32)0xDC00
chl -= halfBase;
chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);;
chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START);
len++;
} else
#endif
if (*source && len < PLAINTEXT_LENGTH) {
chh = *source;
if (chh >= 0xC0) {
unsigned int extraBytesToRead =
opt_trailingBytesUTF8[chh & 0x3f];
switch (extraBytesToRead) {
#if NT_FULL_UNICODE
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
#endif
case 2:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chh -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
} else {
chh = 0x80;
*keybuf_word = (chh << 16) | chl;
keybuf_word += SIMD_COEF_32;
break;
}
*keybuf_word = (chh << 16) | chl;
keybuf_word += SIMD_COEF_32;
}
if (chh != 0x80 || len == 0) {
*keybuf_word = 0x80;
keybuf_word += SIMD_COEF_32;
}
bailout:
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
((unsigned int*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) +
(unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4;
#else
saved_len[index] = utf8_to_utf16(saved_key[index],
PLAINTEXT_LENGTH + 1,
(uchar*)_key,
strlen(_key)) << 1;
if (saved_len[index] < 0)
saved_len[index] = strlen16(saved_key[index]);
#endif
keys_prepared = 0;
}
static void init(struct fmt_main *self)
{
#if defined (_OPENMP) && !defined(SIMD_COEF_32)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
my = self;
if (options.target_enc == UTF_8) {
self->methods.set_key = set_key_utf8;
self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH);
} else {
if (options.target_enc != ASCII &&
options.target_enc != ISO_8859_1)
self->methods.set_key = set_key_CP;
}
if (!saved_key) {
#if SIMD_COEF_32
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*saved_key) * 64, MEM_ALIGN_SIMD);
nthash = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*nthash) * 16, MEM_ALIGN_SIMD);
#else
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
nthash = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*nthash) * 16);
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
#endif
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(unsigned short));
}
if (bitmap == NULL)
bitmap = mem_calloc_align(1, 0x10000 / 8, MEM_ALIGN_CACHE);
else
memset(bitmap, 0, 0x10000 / 8);
use_bitmap = 0; /* we did not use bitmap yet */
cmps_per_crypt = 2; /* try bitmap */
}
static void done(void)
{
MEM_FREE(bitmap);
MEM_FREE(crypt_key);
MEM_FREE(nthash);
#ifndef SIMD_COEF_32
MEM_FREE(saved_len);
#endif
MEM_FREE(saved_key);
}
// Get the key back from the key buffer, from UCS-2
static char *get_key(int index)
{
#ifdef SIMD_COEF_32
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)];
static UTF16 key[PLAINTEXT_LENGTH + 1];
unsigned int md4_size=0;
unsigned int i=0;
for (; md4_size < PLAINTEXT_LENGTH; i += SIMD_COEF_32, md4_size++)
{
key[md4_size] = keybuf_word[i];
key[md4_size+1] = keybuf_word[i] >> 16;
if (key[md4_size] == 0x80 && key[md4_size+1] == 0) {
key[md4_size] = 0;
break;
}
++md4_size;
if (key[md4_size] == 0x80 &&
((keybuf_word[i+SIMD_COEF_32]&0xFFFF) == 0 ||
md4_size == PLAINTEXT_LENGTH))
{
key[md4_size] = 0;
break;
}
}
return (char*)utf16_to_enc(key);
#else
return (char*)utf16_to_enc(saved_key[index]);
#endif
}
static void *get_binary(char *ciphertext)
{
static uchar *binary;
static int warned = 0, loaded = 0;
DES_cblock *challenge = my->methods.salt(ciphertext);
int i, j;
if (!binary) binary = mem_alloc_tiny(FULL_BINARY_SIZE, BINARY_ALIGN);
if (john_main_process)
if (!warned && !ldr_in_pot && !bench_running && ++loaded > 100) {
warned = 1;
fprintf(stderr, "%s: Note: slow loading. For short runs, try "
"--format=%s-naive\ninstead. That version loads "
"faster but runs slower.\n", my->params.label,
my->params.label);
}
if (chap_valid_short(ciphertext))
ciphertext += FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 4 + 1;
else if (chap_valid_long(ciphertext))
ciphertext += FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 2 + 1;
else /* ntlmv1 */
ciphertext = strrchr(ciphertext, '$') + 1;
for (i = 0; i < FULL_BINARY_SIZE - 2; i++) {
binary[2 + i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] << 4;
binary[2 + i] |= atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])];
}
{
uchar key[7] = {0, 0, 0, 0, 0, 0, 0};
DES_key_schedule ks;
DES_cblock b3cmp;
key[0] = valid_i; key[1] = valid_j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(&binary[2 + 8 * 2], &b3cmp, 8)) {
binary[0] = valid_i; binary[1] = valid_j;
goto out;
}
for (i = 0; i < 0x100; i++)
for (j = 0; j < 0x100; j++) {
key[0] = i; key[1] = j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(&binary[2 + 8 * 2], &b3cmp, 8)) {
binary[0] = i; binary[1] = j;
goto out;
}
}
fprintf(stderr, "Bug: %s hash with invalid 3rd block, should "
"have been rejected in valid()\n", my->params.label);
binary[0] = binary[1] = 0x55;
}
out:
return binary;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
if (!keys_prepared) {
int i = 0;
if (use_bitmap) {
#if MAX_KEYS_PER_CRYPT >= 200
//#warning Notice: Using memset
memset(bitmap, 0, 0x10000 / 8);
#else
//#warning Notice: Not using memset
#ifdef SIMD_COEF_32
for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++)
#else
for (i = 0; i < count; i++)
#endif
{
unsigned int value = crypt_key[i];
bitmap[value >> 5] = 0;
}
#endif
}
use_bitmap = cmps_per_crypt >= 2;
cmps_per_crypt = 0;
#ifdef SIMD_COEF_32
#if (BLOCK_LOOPS > 1)
#if defined(_OPENMP) && defined(SSE_OMP)
#pragma omp parallel for
#endif
for (i = 0; i < BLOCK_LOOPS; i++)
SIMDmd4body(&saved_key[i * NBKEYS * 64], (unsigned int*)&nthash[i * NBKEYS * 16], NULL, SSEi_MIXED_IN);
#else
SIMDmd4body(saved_key, (unsigned int*)nthash, NULL, SSEi_MIXED_IN);
#endif
if (use_bitmap)
for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++) {
unsigned int value;
value = *(uint32_t*)
&nthash[GETOUTPOS(12, i)] >> 16;
crypt_key[i] = value;
bitmap[value >> 5] |= 1U << (value & 0x1f);
}
else
for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++) {
crypt_key[i] = *(uint32_t*)
&nthash[GETOUTPOS(12, i)] >> 16;
}
#else
#if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1)
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++)
#endif
{
MD4_CTX ctx;
MD4_Init( &ctx );
MD4_Update(&ctx, saved_key[i], saved_len[i]);
MD4_Final((uchar*)&nthash[i * 16], &ctx);
crypt_key[i] = ((unsigned short*)&nthash[i * 16])[7];
if (use_bitmap) {
unsigned int value = crypt_key[i];
bitmap[value >> 5] |= 1U << (value & 0x1f);
}
}
#endif
keys_prepared = 1;
}
return count;
}
static int cmp_one(void *binary, int index)
{
if (crypt_key[index] == *(unsigned short*)binary) {
DES_key_schedule ks;
DES_cblock computed_binary;
unsigned int key[2];
#ifdef SIMD_COEF_32
int i;
for (i = 0; i < 2; i++)
key[i] = *(uint32_t*)
&nthash[GETOUTPOS(4 * i, index)];
#else
memcpy(key, &nthash[index * 16], 8);
#endif
setup_des_key((unsigned char*)key, &ks);
DES_ecb_encrypt((DES_cblock*)challenge, &computed_binary,
&ks, DES_ENCRYPT);
return !memcmp(((char*)binary) + 2, computed_binary, 8);
}
return 0;
}
static int cmp_all(void *binary, int count)
{
unsigned int value = *(unsigned short*)binary;
int index;
cmps_per_crypt++;
if (use_bitmap && !(bitmap[value >> 5] & (1U << (value & 0x1f))))
goto out;
#ifdef SIMD_COEF_32
/* Let's give the optimizer a hint! */
for (index = 0; index < NBKEYS * BLOCK_LOOPS; index += 2)
#else
for (index = 0; index < count; index += 2)
#endif
{
unsigned int a = crypt_key[index];
unsigned int b = crypt_key[index + 1];
#if 0
if (((a | b) & value) != value)
continue;
#endif
if (a == value || b == value)
goto thorough;
}
goto out;
thorough:
#ifdef SIMD_COEF_32
for (index = 0; index < NBKEYS * BLOCK_LOOPS; index++)
#else
for (; index < count; index++)
#endif
{
if (crypt_key[index] == value && cmp_one(binary, index))
return 1;
}
out:
return 0;
}
static int cmp_exact(char *source, int index)
{
DES_key_schedule ks;
uchar binary[24];
unsigned char key[21];
char *cp;
int i;
#ifdef SIMD_COEF_32
for (i = 0; i < 4; i++)
((uint32_t*)key)[i] = *(uint32_t*)
&nthash[GETOUTPOS(4 * i, index)];
#else
memcpy(key, &nthash[index * 16], 16);
#endif
/* Hash is NULL padded to 21-bytes */
memset(&key[16], 0, 5);
/* Split into three 7-byte segments for use as DES keys
Use each key to DES encrypt challenge
Concatenate output to for 24-byte NTLM response */
setup_des_key(key, &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)binary,
&ks, DES_ENCRYPT);
setup_des_key(&key[7], &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[8],
&ks, DES_ENCRYPT);
setup_des_key(&key[14], &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[16],
&ks, DES_ENCRYPT);
// With the normalized source we simply need to skip the
// $MSCHAPv2$hhhhhhhhhhhhhhhh$ string to get 'real' binary data.
// $NETNTLM$c75c20bff9baa71f4765f360625700b0$
cp = &source[11];
cp = strchr(cp, '$');
++cp;
for (i = 0; i < 24; ++i) {
unsigned char c = (atoi16[ARCH_INDEX(*cp)] << 4) +
(atoi16[ARCH_INDEX(*(cp+1))] );
if (c != binary[i])
return 0;
cp += 2;
}
return 1;
}
static int salt_hash(void *salt) { return *(uint32_t*)salt & (SALT_HASH_SIZE - 1); }
static int binary_hash_0(void *binary) { return *(unsigned short*)binary & PH_MASK_0; }
static int binary_hash_1(void *binary) { return *(unsigned short*)binary & PH_MASK_1; }
static int binary_hash_2(void *binary) { return *(unsigned short*)binary & PH_MASK_2; }
static int binary_hash_3(void *binary) { return *(unsigned short*)binary & PH_MASK_3; }
static int get_hash_0(int index) { return crypt_key[index] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index] & PH_MASK_3; }
struct fmt_main fmt_MSCHAPv2_new = {
{
CHAP_FORMAT_LABEL,
CHAP_FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if !defined(SIMD_COEF_32) || (defined(SIMD_COEF_32) && defined(SSE_OMP))
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8,
{ NULL },
{ FORMAT_TAG },
chap_tests
}, {
init,
done,
fmt_default_reset,
chap_prepare,
chap_valid,
chap_split,
get_binary,
chap_get_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
NULL,
NULL,
NULL
},
salt_hash,
NULL,
set_salt,
set_key_ansi,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
NULL,
NULL,
NULL
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_NETNTLM_new = {
{
NTLM_FORMAT_LABEL,
NTLM_FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if !defined(SIMD_COEF_32) || (defined(SIMD_PARA_MD4) && defined(SSE_OMP))
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8,
{ NULL },
{ FORMAT_TAGN },
ntlm_tests
}, {
init,
done,
fmt_default_reset,
ntlm_prepare,
ntlm_valid,
ntlm_split,
get_binary,
ntlm_get_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
NULL,
NULL,
NULL
},
salt_hash,
NULL,
set_salt,
set_key_ansi,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
NULL,
NULL,
NULL
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
omp_ex_20.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
/*
MIT License
Copyright (c) 2019 NOUREDDINE DAGHBOUDJ
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#define N 1024
void initArray(unsigned int *array, unsigned int size)
{
for(unsigned int i=0; i<size; i++) array[i] = rand() % 40 + 10;
}
void printArray(unsigned int *array, unsigned int size)
{
for(unsigned int i=0; i<size; i++) printf("%i ", array[i]);
printf("\n");
}
void addArrays(unsigned int *A, unsigned int *B, unsigned int *C, unsigned int size)
{
#pragma omp parallel for
for(unsigned int i=0; i<size; i++)
{
C[i] = A[i] + B[i];
}
}
int main()
{
unsigned int a[N], b[N], c[N];
srand(0);
initArray(a, N);
initArray(b, N);
addArrays(a, b, c, N);
printf("C = A + B\n");
printf("A = ");
printArray(a, 16);
printf("B = ");
printArray(b, 16);
printf("C = ");
printArray(c, 16);
return 0;
}
|
aux_interp.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
#include "hypre_hopscotch_hash.h"
/*---------------------------------------------------------------------------
* Auxilary routines for the long range interpolation methods.
* Implemented: "standard", "extended", "multipass", "FF"
*--------------------------------------------------------------------------*/
/* AHB 11/06: Modification of the above original - takes two
communication packages and inserts nodes to position expected for
OUT_marker
offd nodes from comm_pkg take up first chunk of CF_marker_offd, offd
nodes from extend_comm_pkg take up the second chunk 0f CF_marker_offd. */
HYPRE_Int hypre_alt_insert_new_nodes(hypre_ParCSRCommPkg *comm_pkg,
hypre_ParCSRCommPkg *extend_comm_pkg,
HYPRE_Int *IN_marker,
HYPRE_Int full_off_procNodes,
HYPRE_Int *OUT_marker)
{
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int i, index, shift;
HYPRE_Int num_sends, num_recvs;
HYPRE_Int *recv_vec_starts;
HYPRE_Int e_num_sends;
HYPRE_Int *int_buf_data;
HYPRE_Int *e_out_marker;
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
e_num_sends = hypre_ParCSRCommPkgNumSends(extend_comm_pkg);
index = hypre_max(hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends));
int_buf_data = hypre_CTAlloc(HYPRE_Int, index);
/* orig commpkg data*/
index = 0;
HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; ++i) {
int_buf_data[i - begin] =
IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
OUT_marker);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
/* now do the extend commpkg */
/* first we need to shift our position in the OUT_marker */
shift = recv_vec_starts[num_recvs];
e_out_marker = OUT_marker + shift;
index = 0;
begin = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, 0);
end = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; ++i) {
int_buf_data[i - begin] =
IN_marker[hypre_ParCSRCommPkgSendMapElmt(extend_comm_pkg, i)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, extend_comm_pkg, int_buf_data,
e_out_marker);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
hypre_TFree(int_buf_data);
return hypre_error_flag;
}
/* AHB 11/06 : alternate to the extend function below - creates a
* second comm pkg based on found - this makes it easier to use the
* global partition*/
HYPRE_Int
hypre_ParCSRFindExtendCommPkg(hypre_ParCSRMatrix *A, HYPRE_Int newoff, HYPRE_Int *found,
hypre_ParCSRCommPkg **extend_comm_pkg)
{
HYPRE_Int num_sends;
HYPRE_Int *send_procs;
HYPRE_Int *send_map_starts;
HYPRE_Int *send_map_elmts;
HYPRE_Int num_recvs;
HYPRE_Int *recv_procs;
HYPRE_Int *recv_vec_starts;
hypre_ParCSRCommPkg *new_comm_pkg;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Int first_col_diag = hypre_ParCSRMatrixFirstColDiag(A);
/* use found instead of col_map_offd in A, and newoff instead
of num_cols_offd*/
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int row_start=0, row_end=0, col_start = 0, col_end = 0;
HYPRE_Int global_num_cols;
hypre_IJAssumedPart *apart;
hypre_ParCSRMatrixGetLocalRange( A,
&row_start, &row_end ,
&col_start, &col_end );
global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
/* Create the assumed partition */
if (hypre_ParCSRMatrixAssumedPartition(A) == NULL)
{
hypre_ParCSRMatrixCreateAssumedPartition(A);
}
apart = hypre_ParCSRMatrixAssumedPartition(A);
hypre_NewCommPkgCreate_core( comm, found, first_col_diag,
col_start, col_end,
newoff, global_num_cols,
&num_recvs, &recv_procs, &recv_vec_starts,
&num_sends, &send_procs, &send_map_starts,
&send_map_elmts, apart);
#else
HYPRE_Int *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int num_cols_diag = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A));
hypre_MatvecCommPkgCreate_core
(
comm, found, first_col_diag, col_starts,
num_cols_diag, newoff,
first_col_diag, found,
1,
&num_recvs, &recv_procs, &recv_vec_starts,
&num_sends, &send_procs, &send_map_starts,
&send_map_elmts
);
#endif
new_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1);
hypre_ParCSRCommPkgComm(new_comm_pkg) = comm;
hypre_ParCSRCommPkgNumRecvs(new_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvProcs(new_comm_pkg) = recv_procs;
hypre_ParCSRCommPkgRecvVecStarts(new_comm_pkg) = recv_vec_starts;
hypre_ParCSRCommPkgNumSends(new_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendProcs(new_comm_pkg) = send_procs;
hypre_ParCSRCommPkgSendMapStarts(new_comm_pkg) = send_map_starts;
hypre_ParCSRCommPkgSendMapElmts(new_comm_pkg) = send_map_elmts;
*extend_comm_pkg = new_comm_pkg;
return hypre_error_flag;
}
/* sort for non-ordered arrays */
HYPRE_Int hypre_ssort(HYPRE_Int *data, HYPRE_Int n)
{
HYPRE_Int i,si;
HYPRE_Int change = 0;
if(n > 0)
for(i = n-1; i > 0; i--){
si = hypre_index_of_minimum(data,i+1);
if(i != si)
{
hypre_swap_int(data, i, si);
change = 1;
}
}
return change;
}
/* Auxilary function for hypre_ssort */
HYPRE_Int hypre_index_of_minimum(HYPRE_Int *data, HYPRE_Int n)
{
HYPRE_Int answer;
HYPRE_Int i;
answer = 0;
for(i = 1; i < n; i++)
if(data[answer] < data[i])
answer = i;
return answer;
}
void hypre_swap_int(HYPRE_Int *data, HYPRE_Int a, HYPRE_Int b)
{
HYPRE_Int temp;
temp = data[a];
data[a] = data[b];
data[b] = temp;
return;
}
/* Initialize CF_marker_offd, CF_marker, P_marker, P_marker_offd, tmp */
void hypre_initialize_vecs(HYPRE_Int diag_n, HYPRE_Int offd_n, HYPRE_Int *diag_ftc, HYPRE_Int *offd_ftc,
HYPRE_Int *diag_pm, HYPRE_Int *offd_pm, HYPRE_Int *tmp_CF)
{
HYPRE_Int i;
/* Quicker initialization */
if(offd_n < diag_n)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for(i = 0; i < offd_n; i++)
{
diag_ftc[i] = -1;
offd_ftc[i] = -1;
tmp_CF[i] = -1;
if(diag_pm != NULL)
{ diag_pm[i] = -1; }
if(offd_pm != NULL)
{ offd_pm[i] = -1;}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for(i = offd_n; i < diag_n; i++)
{
diag_ftc[i] = -1;
if(diag_pm != NULL)
{ diag_pm[i] = -1; }
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for(i = 0; i < diag_n; i++)
{
diag_ftc[i] = -1;
offd_ftc[i] = -1;
tmp_CF[i] = -1;
if(diag_pm != NULL)
{ diag_pm[i] = -1;}
if(offd_pm != NULL)
{ offd_pm[i] = -1;}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for(i = diag_n; i < offd_n; i++)
{
offd_ftc[i] = -1;
tmp_CF[i] = -1;
if(offd_pm != NULL)
{ offd_pm[i] = -1;}
}
}
return;
}
/* Find nodes that are offd and are not contained in original offd
* (neighbors of neighbors) */
static HYPRE_Int hypre_new_offd_nodes(HYPRE_Int **found, HYPRE_Int num_cols_A_offd, HYPRE_Int *A_ext_i, HYPRE_Int *A_ext_j,
HYPRE_Int num_cols_S_offd, HYPRE_Int *col_map_offd, HYPRE_Int col_1,
HYPRE_Int col_n, HYPRE_Int *Sop_i, HYPRE_Int *Sop_j,
HYPRE_Int *CF_marker_offd)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
#endif
HYPRE_Int i, i1, j, kk, k1;
HYPRE_Int got_loc, loc_col;
/*HYPRE_Int min;*/
HYPRE_Int newoff = 0;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_UnorderedIntMap col_map_offd_inverse;
hypre_UnorderedIntMapCreate(&col_map_offd_inverse, 2*num_cols_A_offd, 16*hypre_NumThreads());
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_A_offd; i++)
{
hypre_UnorderedIntMapPutIfAbsent(&col_map_offd_inverse, col_map_offd[i], i);
}
/* Find nodes that will be added to the off diag list */
HYPRE_Int size_offP = A_ext_i[num_cols_A_offd];
hypre_UnorderedIntSet set;
hypre_UnorderedIntSetCreate(&set, size_offP, 16*hypre_NumThreads());
#pragma omp parallel private(i,j,i1)
{
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] < 0)
{
for (j = A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
i1 = A_ext_j[j];
if(i1 < col_1 || i1 >= col_n)
{
if (!hypre_UnorderedIntSetContains(&set, i1))
{
HYPRE_Int k = hypre_UnorderedIntMapGet(&col_map_offd_inverse, i1);
if (-1 == k)
{
hypre_UnorderedIntSetPut(&set, i1);
}
else
{
A_ext_j[j] = -k - 1;
}
}
}
}
for (j = Sop_i[i]; j < Sop_i[i+1]; j++)
{
i1 = Sop_j[j];
if(i1 < col_1 || i1 >= col_n)
{
if (!hypre_UnorderedIntSetContains(&set, i1))
{
Sop_j[j] = -hypre_UnorderedIntMapGet(&col_map_offd_inverse, i1) - 1;
}
}
}
} /* CF_marker_offd[i] < 0 */
} /* for each row */
} /* omp parallel */
hypre_UnorderedIntMapDestroy(&col_map_offd_inverse);
HYPRE_Int *tmp_found = hypre_UnorderedIntSetCopyToArray(&set, &newoff);
hypre_UnorderedIntSetDestroy(&set);
/* Put found in monotone increasing order */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
hypre_UnorderedIntMap tmp_found_inverse;
if (newoff > 0)
{
hypre_sort_and_create_inverse_map(tmp_found, newoff, &tmp_found, &tmp_found_inverse);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
/* Set column indices for Sop and A_ext such that offd nodes are
* negatively indexed */
#pragma omp parallel for private(kk,k1,got_loc,loc_col) HYPRE_SMP_SCHEDULE
for(i = 0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] < 0)
{
for(kk = Sop_i[i]; kk < Sop_i[i+1]; kk++)
{
k1 = Sop_j[kk];
if(k1 > -1 && (k1 < col_1 || k1 >= col_n))
{
got_loc = hypre_UnorderedIntMapGet(&tmp_found_inverse, k1);
loc_col = got_loc + num_cols_A_offd;
Sop_j[kk] = -loc_col - 1;
}
}
for (kk = A_ext_i[i]; kk < A_ext_i[i+1]; kk++)
{
k1 = A_ext_j[kk];
if(k1 > -1 && (k1 < col_1 || k1 >= col_n))
{
got_loc = hypre_UnorderedIntMapGet(&tmp_found_inverse, k1);
loc_col = got_loc + num_cols_A_offd;
A_ext_j[kk] = -loc_col - 1;
}
}
}
}
if (newoff)
{
hypre_UnorderedIntMapDestroy(&tmp_found_inverse);
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
HYPRE_Int size_offP;
HYPRE_Int *tmp_found;
HYPRE_Int min;
HYPRE_Int ifound;
size_offP = A_ext_i[num_cols_A_offd]+Sop_i[num_cols_A_offd];
tmp_found = hypre_CTAlloc(HYPRE_Int, size_offP);
/* Find nodes that will be added to the off diag list */
for (i = 0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] < 0)
{
for (j = A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
i1 = A_ext_j[j];
if(i1 < col_1 || i1 >= col_n)
{
ifound = hypre_BinarySearch(col_map_offd,i1,num_cols_A_offd);
if(ifound == -1)
{
tmp_found[newoff]=i1;
newoff++;
}
else
{
A_ext_j[j] = -ifound-1;
}
}
}
for (j = Sop_i[i]; j < Sop_i[i+1]; j++)
{
i1 = Sop_j[j];
if(i1 < col_1 || i1 >= col_n)
{
ifound = hypre_BinarySearch(col_map_offd,i1,num_cols_A_offd);
if(ifound == -1)
{
tmp_found[newoff]=i1;
newoff++;
}
else
{
Sop_j[j] = -ifound-1;
}
}
}
}
}
/* Put found in monotone increasing order */
if (newoff > 0)
{
hypre_qsort0(tmp_found,0,newoff-1);
ifound = tmp_found[0];
min = 1;
for (i=1; i < newoff; i++)
{
if (tmp_found[i] > ifound)
{
ifound = tmp_found[i];
tmp_found[min++] = ifound;
}
}
newoff = min;
}
/* Set column indices for Sop and A_ext such that offd nodes are
* negatively indexed */
for(i = 0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] < 0)
{
for(kk = Sop_i[i]; kk < Sop_i[i+1]; kk++)
{
k1 = Sop_j[kk];
if(k1 > -1 && (k1 < col_1 || k1 >= col_n))
{
got_loc = hypre_BinarySearch(tmp_found,k1,newoff);
if(got_loc > -1)
loc_col = got_loc + num_cols_A_offd;
Sop_j[kk] = -loc_col - 1;
}
}
for (kk = A_ext_i[i]; kk < A_ext_i[i+1]; kk++)
{
k1 = A_ext_j[kk];
if(k1 > -1 && (k1 < col_1 || k1 >= col_n))
{
got_loc = hypre_BinarySearch(tmp_found,k1,newoff);
loc_col = got_loc + num_cols_A_offd;
A_ext_j[kk] = -loc_col - 1;
}
}
}
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
*found = tmp_found;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
#endif
return newoff;
}
HYPRE_Int hypre_exchange_marker(hypre_ParCSRCommPkg *comm_pkg,
HYPRE_Int *IN_marker,
HYPRE_Int *OUT_marker)
{
HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
HYPRE_Int *int_buf_data = hypre_CTAlloc(HYPRE_Int, end);
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; ++i) {
int_buf_data[i - begin] =
IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)];
}
hypre_ParCSRCommHandle *comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
OUT_marker);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data);
return hypre_error_flag;
}
HYPRE_Int hypre_exchange_interp_data(
HYPRE_Int **CF_marker_offd,
HYPRE_Int **dof_func_offd,
hypre_CSRMatrix **A_ext,
HYPRE_Int *full_off_procNodes,
hypre_CSRMatrix **Sop,
hypre_ParCSRCommPkg **extend_comm_pkg,
hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int skip_fine_or_same_sign) // skip_fine_or_same_sign if we want to skip fine points in S and nnz with the same sign as diagonal in A
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] -= hypre_MPI_Wtime();
#endif
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Int *found = NULL;
/*----------------------------------------------------------------------
* Get the off processors rows for A and S, associated with columns in
* A_offd and S_offd.
*---------------------------------------------------------------------*/
*CF_marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd);
hypre_exchange_marker(comm_pkg, CF_marker, *CF_marker_offd);
hypre_ParCSRCommHandle *comm_handle_a_idx, *comm_handle_a_data;
*A_ext = hypre_ParCSRMatrixExtractBExt_Overlap(A,A,1,&comm_handle_a_idx,&comm_handle_a_data,CF_marker,*CF_marker_offd,skip_fine_or_same_sign,skip_fine_or_same_sign);
HYPRE_Int *A_ext_i = hypre_CSRMatrixI(*A_ext);
HYPRE_Int *A_ext_j = hypre_CSRMatrixJ(*A_ext);
HYPRE_Int A_ext_rows = hypre_CSRMatrixNumRows(*A_ext);
hypre_ParCSRCommHandle *comm_handle_s_idx;
*Sop = hypre_ParCSRMatrixExtractBExt_Overlap(S,A,0,&comm_handle_s_idx,NULL,CF_marker,*CF_marker_offd,skip_fine_or_same_sign,0);
HYPRE_Int *Sop_i = hypre_CSRMatrixI(*Sop);
HYPRE_Int *Sop_j = hypre_CSRMatrixJ(*Sop);
HYPRE_Int Soprows = hypre_CSRMatrixNumRows(*Sop);
HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_s_idx->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_s_idx);
hypre_TFree(send_idx);
send_idx = (HYPRE_Int *)comm_handle_a_idx->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_a_idx);
hypre_TFree(send_idx);
/* Find nodes that are neighbors of neighbors, not found in offd */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] += hypre_MPI_Wtime();
#endif
HYPRE_Int newoff = hypre_new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j,
Soprows, col_map_offd, col_1, col_n,
Sop_i, Sop_j, *CF_marker_offd);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] -= hypre_MPI_Wtime();
#endif
if(newoff >= 0)
*full_off_procNodes = newoff + num_cols_A_offd;
else
{
return hypre_error_flag;
}
/* Possibly add new points and new processors to the comm_pkg, all
* processors need new_comm_pkg */
/* AHB - create a new comm package just for extended info -
this will work better with the assumed partition*/
hypre_ParCSRFindExtendCommPkg(A, newoff, found,
extend_comm_pkg);
*CF_marker_offd = hypre_TReAlloc(*CF_marker_offd, HYPRE_Int, *full_off_procNodes);
hypre_exchange_marker(*extend_comm_pkg, CF_marker, *CF_marker_offd + A_ext_rows);
if(num_functions > 1)
{
if (*full_off_procNodes > 0)
*dof_func_offd = hypre_CTAlloc(HYPRE_Int, *full_off_procNodes);
hypre_alt_insert_new_nodes(comm_pkg, *extend_comm_pkg, dof_func,
*full_off_procNodes, *dof_func_offd);
}
hypre_TFree(found);
HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_a_data->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_a_data);
hypre_TFree(send_data);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
void hypre_build_interp_colmap(hypre_ParCSRMatrix *P, HYPRE_Int full_off_procNodes, HYPRE_Int *tmp_CF_marker_offd, HYPRE_Int *fine_to_coarse_offd)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
#endif
HYPRE_Int i, index;
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P->diag);
HYPRE_Int P_offd_size = P->offd->i[n_fine];
HYPRE_Int *P_offd_j = P->offd->j;
HYPRE_Int *col_map_offd_P = NULL;
HYPRE_Int *P_marker = NULL;
if (full_off_procNodes)
P_marker = hypre_TAlloc(HYPRE_Int, full_off_procNodes);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < full_off_procNodes; i++)
P_marker[i] = 0;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
/* These two loops set P_marker[i] to 1 if it appears in P_offd_j and if
* tmp_CF_marker_offd has i marked. num_cols_P_offd is then set to the
* total number of times P_marker is set */
#pragma omp parallel for private(i,index) HYPRE_SMP_SCHEDULE
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if(tmp_CF_marker_offd[index] >= 0)
{ P_marker[index] = 1; }
}
HYPRE_Int prefix_sum_workspace[hypre_NumThreads() + 1];
HYPRE_Int num_cols_P_offd = 0;
#pragma omp parallel private(i)
{
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, full_off_procNodes);
HYPRE_Int local_num_cols_P_offd = 0;
for (i = i_begin; i < i_end; i++)
{
if (P_marker[i] == 1) local_num_cols_P_offd++;
}
hypre_prefix_sum(&local_num_cols_P_offd, &num_cols_P_offd, prefix_sum_workspace);
#pragma omp master
{
if (num_cols_P_offd)
col_map_offd_P = hypre_TAlloc(HYPRE_Int, num_cols_P_offd);
}
#pragma omp barrier
for (i = i_begin; i < i_end; i++)
{
if (P_marker[i] == 1)
{
col_map_offd_P[local_num_cols_P_offd++] = fine_to_coarse_offd[i];
}
}
}
hypre_UnorderedIntMap col_map_offd_P_inverse;
hypre_sort_and_create_inverse_map(col_map_offd_P, num_cols_P_offd, &col_map_offd_P, &col_map_offd_P_inverse);
// find old idx -> new idx map
#pragma omp parallel for
for (i = 0; i < full_off_procNodes; i++)
P_marker[i] = hypre_UnorderedIntMapGet(&col_map_offd_P_inverse, fine_to_coarse_offd[i]);
if (num_cols_P_offd)
{
hypre_UnorderedIntMapDestroy(&col_map_offd_P_inverse);
}
#pragma omp parallel for
for(i = 0; i < P_offd_size; i++)
P_offd_j[i] = P_marker[P_offd_j[i]];
#else /* HYPRE_CONCURRENT_HOPSCOTCH */
HYPRE_Int num_cols_P_offd = 0;
HYPRE_Int j;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
if(tmp_CF_marker_offd[index] >= 0)
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
}
if (num_cols_P_offd)
col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while( P_marker[index] == 0) index++;
col_map_offd_P[i] = index++;
}
for(i = 0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while (P_marker[index] == 0) index++;
col_map_offd_P[i] = fine_to_coarse_offd[index];
index++;
}
/* Sort the col_map_offd_P and P_offd_j correctly */
for(i = 0; i < num_cols_P_offd; i++)
P_marker[i] = col_map_offd_P[i];
/* Check if sort actually changed anything */
if(hypre_ssort(col_map_offd_P,num_cols_P_offd))
{
for(i = 0; i < P_offd_size; i++)
for(j = 0; j < num_cols_P_offd; j++)
if(P_marker[P_offd_j[i]] == col_map_offd_P[j])
{
P_offd_j[i] = j;
j = num_cols_P_offd;
}
}
#endif /* HYPRE_CONCURRENT_HOPSCOTCH */
hypre_TFree(P_marker);
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P->offd) = num_cols_P_offd;
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
#endif
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
ast-dump-openmp-cancellation-point.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp parallel
{
#pragma omp cancellation point parallel
}
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-cancellation-point.c:3:1, line:8:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:8:1>
// CHECK-NEXT: `-OMPParallelDirective {{.*}} <line:4:1, col:21>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3, line:7:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CompoundStmt {{.*}} <line:5:3, line:7:3>
// CHECK-NEXT: | `-OMPCancellationPointDirective {{.*}} <line:6:1, col:40> openmp_standalone_directive
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-cancellation-point.c:4:1) *const restrict'
|
GB_unaryop__identity_uint64_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint64_int32
// op(A') function: GB_tran__identity_uint64_int32
// C type: uint64_t
// A type: int32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint64_int32
(
uint64_t *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint64_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convertCloud.h | /****************************************************************************
**
** Copyright (C) 2017 TU Wien, ACIN, Vision 4 Robotics (V4R) group
** Contact: v4r.acin.tuwien.ac.at
**
** This file is part of V4R
**
** V4R is distributed under dual licenses - GPLv3 or closed source.
**
** GNU General Public License Usage
** V4R is free software: you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published
** by the Free Software Foundation, either version 3 of the License, or
** (at your option) any later version.
**
** V4R is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** Please review the following information to ensure the GNU General Public
** License requirements will be met: https://www.gnu.org/licenses/gpl-3.0.html.
**
**
** Commercial License Usage
** If GPL is not suitable for your project, you must purchase a commercial
** license to use V4R. Licensees holding valid commercial V4R licenses may
** use this file in accordance with the commercial license agreement
** provided with the Software or, alternatively, in accordance with the
** terms contained in a written agreement between you and TU Wien, ACIN, V4R.
** For licensing terms and conditions please contact office<at>acin.tuwien.ac.at.
**
**
** The copyright holder additionally grants the author(s) of the file the right
** to use, copy, modify, merge, publish, distribute, sublicense, and/or
** sell copies of their contributions without any restrictions.
**
****************************************************************************/
/**
* @file main.cpp
* @author Johann Prankl (prankl@acin.tuwien.ac.at)
* @date 2017
* @brief
*
*/
#ifndef KP_CONVERT_CLOUD_HPP
#define KP_CONVERT_CLOUD_HPP
#include <float.h>
#include <omp.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <v4r/common/PointTypes.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <v4r/common/impl/DataMatrix2D.hpp>
namespace v4r {
inline void convertCloud(const pcl::PointCloud<pcl::PointXYZRGB> &cloud, DataMatrix2D<PointXYZRGB> &kp_cloud) {
kp_cloud.resize(cloud.height, cloud.width);
for (size_t i = 0; i < cloud.points.size(); i++) {
const pcl::PointXYZRGB &pt = cloud.points[i];
PointXYZRGB &kp = kp_cloud.data[i];
kp.pt = pt.getVector4fMap();
kp.rgb = pt.rgb;
}
}
inline void convertCloud(const pcl::PointCloud<pcl::PointXYZRGB> &cloud, DataMatrix2D<Eigen::Vector3f> &kp_cloud,
cv::Mat_<cv::Vec3b> &image) {
kp_cloud.resize(cloud.height, cloud.width);
image = cv::Mat_<cv::Vec3b>(cloud.height, cloud.width);
for (size_t i = 0; i < cloud.points.size(); i++) {
const pcl::PointXYZRGB &pt = cloud.points[i];
kp_cloud.data[i] = pt.getVector3fMap();
image(i) = cv::Vec3b(pt.b, pt.g, pt.r);
}
}
inline void convertCloud(const pcl::PointCloud<pcl::PointXYZRGB> &cloud, DataMatrix2D<Eigen::Vector3f> &kp_cloud) {
kp_cloud.resize(cloud.height, cloud.width);
for (size_t i = 0; i < cloud.points.size(); i++) {
const pcl::PointXYZRGB &pt = cloud.points[i];
kp_cloud.data[i] = pt.getVector3fMap();
}
}
inline void convertCloud(const pcl::PointCloud<pcl::PointXYZ> &cloud, DataMatrix2D<Eigen::Vector3f> &kp_cloud) {
kp_cloud.resize(cloud.height, cloud.width);
for (size_t i = 0; i < cloud.points.size(); i++) {
const pcl::PointXYZ &pt = cloud.points[i];
kp_cloud.data[i] = pt.getVector3fMap();
}
}
inline void convertCloud(const DataMatrix2D<PointXYZRGB> &kp_cloud, pcl::PointCloud<pcl::PointXYZRGB> &pcl_cloud) {
pcl_cloud.points.resize(kp_cloud.data.size());
pcl_cloud.width = kp_cloud.cols;
pcl_cloud.height = kp_cloud.rows;
pcl_cloud.is_dense = false;
for (size_t i = 0; i < pcl_cloud.points.size(); i++) {
const PointXYZRGB &kp = kp_cloud.data[i];
pcl::PointXYZRGB &pt = pcl_cloud.points[i];
pt.getVector4fMap() = kp.pt;
pt.rgb = kp.rgb;
}
}
inline void convertCloud(const DataMatrix2D<PointXYZ> &kp_cloud, pcl::PointCloud<pcl::PointXYZ> &pcl_cloud) {
pcl_cloud.points.resize(kp_cloud.data.size());
pcl_cloud.width = kp_cloud.cols;
pcl_cloud.height = kp_cloud.rows;
pcl_cloud.is_dense = false;
for (size_t i = 0; i < pcl_cloud.points.size(); i++) {
const PointXYZ &kp = kp_cloud.data[i];
pcl::PointXYZ &pt = pcl_cloud.points[i];
pt.getVector3fMap() = kp.pt;
}
}
// -------------- SAME WITH EIGEN MATRIX-------------------
inline void convertCloud(const pcl::PointCloud<pcl::PointXYZRGB> &cloud, Eigen::Matrix4Xf &matrix) {
matrix = Eigen::Matrix4Xf(4, cloud.points.size());
#pragma omp parallel for schedule(dynamic)
for (size_t i = 0; i < cloud.points.size(); i++) {
const pcl::PointXYZRGB &pt = cloud.points[i];
matrix.col(i) = pt.getVector4fMap();
}
}
inline void convertCloud(const pcl::PointCloud<pcl::PointXYZRGB> &cloud, Eigen::Matrix3Xf &matrix) {
matrix = Eigen::Matrix3Xf(3, cloud.points.size());
#pragma omp parallel for schedule(dynamic)
for (size_t i = 0; i < cloud.points.size(); i++) {
const pcl::PointXYZRGB &pt = cloud.points[i];
matrix.col(i) = pt.getVector3fMap();
}
}
inline void convertCloud(const pcl::PointCloud<pcl::PointXYZRGB> &cloud, const std::vector<int> &indices,
Eigen::Matrix3Xf &matrix) {
if (indices.empty())
throw std::runtime_error("Indices are empty!");
matrix = Eigen::Matrix3Xf(3, indices.size());
#pragma omp parallel for schedule(dynamic)
for (size_t i = 0; i < indices.size(); i++) {
const pcl::PointXYZRGB &pt = cloud.points[indices[i]];
matrix.col(i) = pt.getVector3fMap();
}
}
inline void convertCloud(const pcl::PointCloud<pcl::PointXYZ> &cloud, Eigen::Matrix3Xf &matrix) {
matrix = Eigen::Matrix3Xf(3, cloud.points.size());
#pragma omp parallel for schedule(dynamic)
for (size_t i = 0; i < cloud.points.size(); i++) {
const pcl::PointXYZ &pt = cloud.points[i];
matrix.col(i) = pt.getVector3fMap();
}
}
inline void convertCloud(const pcl::PointCloud<pcl::PointXYZ> &cloud, const std::vector<int> &indices,
Eigen::Matrix3Xf &matrix) {
if (indices.empty())
throw std::runtime_error("Indices are empty!");
matrix = Eigen::Matrix3Xf(3, indices.size());
#pragma omp parallel for schedule(dynamic)
for (size_t i = 0; i < indices.size(); i++) {
const pcl::PointXYZ &pt = cloud.points[indices[i]];
matrix.col(i) = pt.getVector3fMap();
}
}
inline void convertCloud(const pcl::PointCloud<pcl::Normal> &normal, Eigen::Matrix3Xf &matrix) {
matrix = Eigen::Matrix3Xf(3, normal.points.size());
#pragma omp parallel for schedule(dynamic)
for (size_t i = 0; i < normal.points.size(); i++) {
const pcl::Normal &pt = normal.points[i];
matrix.col(i) = pt.getNormalVector3fMap();
}
}
inline void convertCloud(const pcl::PointCloud<pcl::Normal> &normal, const std::vector<int> &indices,
Eigen::Matrix3Xf &matrix) {
if (indices.empty())
throw std::runtime_error("Indices are empty!");
matrix = Eigen::Matrix3Xf(3, indices.size());
#pragma omp parallel for schedule(dynamic)
for (size_t i = 0; i < indices.size(); i++) {
const pcl::Normal &pt = normal.points[indices[i]];
matrix.col(i) = pt.getNormalVector3fMap();
}
}
inline void convertCloud(const Eigen::Matrix4Xf &matrix, pcl::PointCloud<pcl::PointXYZRGB> &cloud) {
cloud.points.resize(matrix.cols());
cloud.width = matrix.cols();
cloud.height = 1;
cloud.is_dense = false;
#pragma omp parallel for schedule(dynamic)
for (size_t i = 0; i < cloud.points.size(); i++) {
pcl::PointXYZRGB &pt = cloud.points[i];
pt.getVector4fMap() = matrix.col(i);
}
}
inline void convertCloud(const Eigen::Matrix3Xf &matrix, pcl::PointCloud<pcl::PointXYZRGB> &cloud) {
cloud.points.resize(matrix.cols());
cloud.width = matrix.cols();
cloud.height = 1;
cloud.is_dense = false;
#pragma omp parallel for schedule(dynamic)
for (size_t i = 0; i < cloud.points.size(); i++) {
pcl::PointXYZRGB &pt = cloud.points[i];
pt.getVector3fMap() = matrix.col(i);
}
}
inline void convertCloud(const Eigen::Matrix3Xf &matrix, pcl::PointCloud<pcl::PointXYZ> &cloud) {
cloud.points.resize(matrix.cols());
cloud.width = matrix.cols();
cloud.height = 1;
cloud.is_dense = false;
#pragma omp parallel for schedule(dynamic)
for (size_t i = 0; i < cloud.points.size(); i++) {
pcl::PointXYZ &pt = cloud.points[i];
pt.getVector3fMap() = matrix.col(i);
}
}
inline void convertCloud(const Eigen::Matrix3Xf &matrix, pcl::PointCloud<pcl::Normal> &normals) {
normals.points.resize(matrix.cols());
normals.width = matrix.cols();
normals.height = 1;
normals.is_dense = false;
#pragma omp parallel for schedule(dynamic)
for (size_t i = 0; i < normals.points.size(); i++) {
pcl::Normal &pt = normals.points[i];
pt.getNormalVector3fMap() = matrix.col(i);
}
}
} // namespace v4r
#endif
|
GB_unaryop__identity_bool_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_bool_fp64
// op(A') function: GB_tran__identity_bool_fp64
// C type: bool
// A type: double
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_bool_fp64
(
bool *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_bool_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
stencil2D.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <omp.h>
#define I(i, j) ((N) * (i) + (j))
#define NUM_ITERATIONS 1000
#define N 2000
void stencil2D(double* G1, double* G2) {
for (size_t it = 0; it < NUM_ITERATIONS; it++) {
for (size_t i = 1; i < N - 1; i++) {
for (size_t j = 1; j < N - 1; j++) {
G1[I(i, j)] = 0.2 * (G2[I(i - 1, j)] + G2[I(i + 1, j)] + G2[I(i, j - 1)] +
G2[I(i, j + 1)] + G2[I(i, j)]);
}
}
memcpy(G1, G2, sizeof(long) * N * N);
}
}
void stencil2D_nocopy(double* G1, double* G2) {
for (size_t it = 0; it < NUM_ITERATIONS; it++) {
for (size_t i = 1; i < N - 1; i++) {
for (size_t j = 1; j < N - 1; j++) {
G1[I(i, j)] = 0.2 * (G2[I(i - 1, j)] + G2[I(i + 1, j)] + G2[I(i, j - 1)] +
G2[I(i, j + 1)] + G2[I(i, j)]);
}
}
double* tmp = G1;
G1 = G2;
G2 = tmp;
}
}
void stencil2D_parallel_nocopy(double* G1, double* G2) {
#pragma omp parallel
{
for (size_t it = 0; it < NUM_ITERATIONS; it++) {
#pragma omp for
for (size_t i = 1; i < N - 1; i++) {
for (size_t j = 1; j < N - 1; j++) {
G1[I(i, j)] = 0.2 * (G2[I(i - 1, j)] + G2[I(i + 1, j)] + G2[I(i, j - 1)] +
G2[I(i, j + 1)] + G2[I(i, j)]);
}
}
double* tmp = G1;
G1 = G2;
G2 = tmp;
}
}
}
void test(const char* func_name, void f(double*, double*)) {
double* G1 = malloc(sizeof(double) * N * N);
double* G2 = malloc(sizeof(double) * N * N);
// populate matrix with random numbers
for (size_t i = 0; i < N - 1; i++) {
for (size_t j = 0; j < N - 1; j++) {
G1[I(i, j)] = G2[I(i, j)] = rand();
}
}
double time = omp_get_wtime();
f(G1, G2);
printf("%s: %fs\n", func_name, omp_get_wtime()-time);
free(G1);
free(G2);
}
int main(void) {
time_t t;
srand((unsigned) time(&t));
test("stencil2D", stencil2D);
test("stencil2D_nocopy", stencil2D_nocopy);
test("stencil2D_parallel_nocopy", stencil2D_parallel_nocopy);
return 0;
}
|
dds.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/profile.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/module.h"
#include "magick/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColourLookup
{
DDSSourceBlock sources[2];
} DDSSingleColourLookup;
typedef MagickBooleanType
DDSDecoder(Image *, DDSInfo *, ExceptionInfo *);
static const DDSSingleColourLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColourLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColourLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if ((ssize_t) max - min < steps) \
max = MagickMin(min + steps, 255); \
if ((ssize_t) max - min < steps) \
min = MagickMax(0, (ssize_t) max - steps)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
static MagickBooleanType
ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3,
DDSVector4 *,DDSVector4 *,unsigned char *,size_t),
ReadDDSInfo(Image *,DDSInfo *),
ReadDXT1(Image *,DDSInfo *,ExceptionInfo *),
ReadDXT3(Image *,DDSInfo *,ExceptionInfo *),
ReadDXT5(Image *,DDSInfo *,ExceptionInfo *),
ReadUncompressedRGB(Image *,DDSInfo *,ExceptionInfo *),
ReadUncompressedRGBA(Image *,DDSInfo *,ExceptionInfo *),
SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
WriteDDSImage(const ImageInfo *,Image *),
WriteMipmaps(Image *,const size_t,const size_t,const size_t,
const MagickBooleanType,const MagickBooleanType,ExceptionInfo *);
static void
RemapIndices(const ssize_t *,const unsigned char *,unsigned char *),
WriteDDSInfo(Image *,const size_t,const size_t,const size_t),
WriteFourCC(Image *,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteIndices(Image *,const DDSVector3,const DDSVector3, unsigned char *),
WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *),
WriteUncompressed(Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColourLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z)));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status,
cubemap = MagickFalse,
volume = MagickFalse,
matte;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
(void) SeekBlob(image, 128, SEEK_SET);
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
matte = MagickTrue;
decoder = ReadUncompressedRGBA;
}
else
{
matte = MagickTrue;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
matte = MagickFalse;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
matte = MagickFalse;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
matte = MagickTrue;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
matte = MagickTrue;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
if ((num_images == 0) || (num_images > GetBlobSize(image)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse)
ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit");
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
/* Start a new image */
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->matte = matte;
image->compression = compression;
image->columns = dds_info.width;
image->rows = dds_info.height;
image->storage_class = DirectClass;
image->endian = LSBEndian;
image->depth = 8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
(void) SetImageBackgroundColor(image);
if ((decoder)(image, &dds_info, exception) != MagickTrue)
{
(void) CloseBlob(image);
if (n == 0)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
return MagickTrue;
}
static MagickBooleanType ReadDXT1(Image *image,DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
PixelPacket
*q;
ssize_t
i,
x;
size_t
bits;
ssize_t
j,
y;
unsigned char
code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickFalse);
if (EOFBlob(image) != MagickFalse)
break;
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if (((x + i) < (ssize_t) image->columns) &&
((y + j) < (ssize_t) image->rows))
{
code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
SetPixelOpacity(q,ScaleCharToQuantum(colors.a[code]));
if ((colors.a[code] != 0) && (image->matte == MagickFalse))
image->matte=MagickTrue; /* Correct matte */
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
if (EOFBlob(image) != MagickFalse)
break;
}
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
ssize_t
j,
y;
PixelPacket
*q;
ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x),
MagickMin(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
break;
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
alpha));
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
if (EOFBlob(image) != MagickFalse)
break;
}
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
ssize_t
j,
y;
MagickSizeType
alpha_bits;
PixelPacket
*q;
ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x),
MagickMin(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
break;
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
alpha));
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
if (EOFBlob(image) != MagickFalse)
break;
}
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGB(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
PixelPacket
*q;
ssize_t
x, y;
unsigned short
color;
if (dds_info->pixelformat.rgb_bitcount == 8)
(void) SetImageType(image,GrayscaleType);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
for (y = 0; y < (ssize_t) dds_info->height; y++)
{
q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
for (x = 0; x < (ssize_t) dds_info->width; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
SetPixelGray(q,ScaleCharToQuantum(ReadBlobByte(image)));
else if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)));
}
else
{
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
if (dds_info->pixelformat.rgb_bitcount == 32)
(void) ReadBlobByte(image);
}
SetPixelAlpha(q,QuantumRange);
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBA(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
PixelPacket
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleMatteType);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
for (y = 0; y < (ssize_t) dds_info->height; y++)
{
q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
for (x = 0; x < (ssize_t) dds_info->width; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(q,(color & (1 << 15)) ? QuantumRange : 0);
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)));
}
else if (alphaBits == 2)
{
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
(color >> 8)));
SetPixelGray(q,ScaleCharToQuantum((unsigned char)color));
}
else
{
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)));
}
}
else
{
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = SetMagickInfo("DDS");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->magick_module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
entry = SetMagickInfo("DXT1");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->magick_module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
entry = SetMagickInfo("DXT5");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->magick_module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
ssize_t
i;
MagickOffsetType
offset;
size_t
h,
w;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
w = DIV2(dds_info->width);
h = DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset = (MagickOffsetType) ((w + 3) / 4) * ((h + 3) / 4) * texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
if ((w == 1) && (h == 1))
break;
w = DIV2(w);
h = DIV2(h);
}
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
MagickOffsetType
offset;
ssize_t
i;
size_t
h,
w;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
w = DIV2(dds_info->width);
h = DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset = (MagickOffsetType) w * h * pixel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w = DIV2(w);
h = DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
static void WriteAlphas(Image *image, const ssize_t* alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4* points, const ssize_t* map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if (clusterFit == MagickFalse || count == 0)
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (!image->matte)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
if (image_info->compression == DXT1Compression)
compression=FOURCC_DXT1;
else if (image_info->compression == NoCompression)
pixelFormat=DDPF_RGB;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (IsStringTrue(option) != MagickFalse)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (IsStringTrue(option) != MagickFalse)
weightByAlpha=MagickTrue;
}
}
}
maxMipmaps=SIZE_MAX;
mipmaps=0;
if ((image->columns & (image->columns - 1)) == 0 &&
(image->rows & (image->rows - 1)) == 0)
{
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
&image->exception);
if (mipmaps > 0 && WriteMipmaps(image,pixelFormat,compression,mipmaps,
clusterFit,weightByAlpha,&image->exception) == MagickFalse)
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MaxTextExtent];
ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (format == DDPF_FOURCC)
flags=flags | DDSD_LINEARSIZE;
else
flags=flags | DDSD_PITCH;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->matte)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (pixelFormat == DDPF_FOURCC)
{
/* Compressed DDS requires linear compressed size of first image */
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8));
else /* DXT5 */
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16));
}
else
{
/* Uncompressed DDS requires byte pitch of first image */
if (image->matte != MagickFalse)
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4));
else
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3));
}
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) memset(software,0,sizeof(software));
(void) CopyMagickString(software,"IMAGEMAGICK",MaxTextExtent);
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) /* bitcount / masks */
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->matte != MagickFalse)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) /* ddscaps2 + reserved region */
(void) WriteBlobLSBLong(image,0x00);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
const PixelPacket
*p;
ssize_t
x;
ssize_t
i,
y,
bx,
by;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const PixelPacket *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(p));
else
alpha = 255;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p++;
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression, const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static inline size_t ClampToLimit(const float value,
const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char* indices)
{
ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static MagickBooleanType WriteMipmaps(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
Image*
resize_image;
ssize_t
i;
size_t
columns,
rows;
columns = image->columns;
rows = image->rows;
for (i=0; i< (ssize_t) mipmaps; i++)
{
resize_image = ResizeImage(image,DIV2(columns),DIV2(rows),TriangleFilter,1.0,
exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
DestroyBlob(resize_image);
resize_image->blob=ReferenceBlob(image->blob);
WriteImageData(resize_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
resize_image=DestroyImage(resize_image);
columns = DIV2(columns);
rows = DIV2(rows);
}
return(MagickTrue);
}
static void WriteSingleColorFit(Image *image, const DDSVector4* points,
const ssize_t* map)
{
DDSVector3
start,
end;
ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
const PixelPacket
*p;
ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(p)));
if (image->matte)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(p)));
p++;
}
}
}
|
GB_unop__acosh_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__acosh_fc32_fc32
// op(A') function: GB_unop_tran__acosh_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = cacoshf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cacoshf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = cacoshf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOSH || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__acosh_fc32_fc32
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = cacoshf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = cacoshf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__acosh_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sha1-avx2.c | #define _BSD_SOURCE
#include <stdint.h>
#include <iso646.h>
#include <stdbool.h>
#include <string.h>
#include "proofofwork-private.h"
static inline __m256i sha1_f00(__m256i b, __m256i c, __m256i d) { return (b & c) | (~ b & d); }
static inline __m256i sha1_f20(__m256i b, __m256i c, __m256i d) { return b ^ c ^ d; }
static inline __m256i sha1_f40(__m256i b, __m256i c, __m256i d) { return (b & c) | (c & d) | (d & b); }
static inline __m256i sha1_f60(__m256i b, __m256i c, __m256i d) { return b ^ c ^ d; }
static const uint32_t sha1_k00 = 0x5a827999;
static const uint32_t sha1_k20 = 0x6ed9eba1;
static const uint32_t sha1_k40 = 0x8f1bbcdc;
static const uint32_t sha1_k60 = 0xca62c1d6;
static const uint32_t sha1_a0 = 0x67452301;
static const uint32_t sha1_b0 = 0xefcdab89;
static const uint32_t sha1_c0 = 0x98badcfe;
static const uint32_t sha1_d0 = 0x10325476;
static const uint32_t sha1_e0 = 0xc3d2e1f0;
static inline void sha1_roundv(__m256i a, __m256i *b, __m256i c, __m256i d, __m256i *e, __m256i w, uint32_t k, __m256i (*f)(__m256i, __m256i, __m256i)) {
*e = _mm256_add_epi32( *e,
_mm256_add_epi32( mm256_rol_epi32(a, 5),
_mm256_add_epi32( f(*b, c, d),
_mm256_add_epi32( w,
_mm256_set1_epi32(k) ))));
*b = mm256_rol_epi32(*b, 30);
}
static inline void sha1_round(__m256i a, __m256i *b, __m256i c, __m256i d, __m256i *e, uint32_t w, uint32_t k, __m256i (*f)(__m256i, __m256i, __m256i)) {
*e = _mm256_add_epi32( *e,
_mm256_add_epi32( mm256_rol_epi32(a, 5),
_mm256_add_epi32( f(*b, c, d),
_mm256_set1_epi32(w + k) )));
*b = mm256_rol_epi32(*b, 30);
}
uint64_t pow_sha1_count = 0; // for benchmark
bool pow_sha1_mine(uint8_t const *mask, uint8_t const *target, uint8_t *buffer, uint64_t size, int32_t const *indices) {
// check arguments
static_assert (__BYTE_ORDER == __LITTLE_ENDIAN, "");
if (mask == NULL) return false;
if (target == NULL) return false;
if (buffer == NULL) return false;
if (indices == NULL) return false;
for (int i = 0; i < pow_indices_length; ++ i) {
if (indices[i] < -1 or (int64_t)size <= indices[i]) return false;
}
if (indices[0] == -1) return false;
if (size > pow_sha1_block_length - sizeof(uint64_t) / CHAR_BIT - 1) return false;
// load hash
const uint32_t mask_a = be32toh(((uint32_t *)mask)[0]);
const uint32_t mask_b = be32toh(((uint32_t *)mask)[1]);
const uint32_t mask_c = be32toh(((uint32_t *)mask)[2]);
const uint32_t mask_d = be32toh(((uint32_t *)mask)[3]);
const uint32_t mask_e = be32toh(((uint32_t *)mask)[4]);
const uint32_t target_a = be32toh(((uint32_t *)target)[0]) & mask_a;
const uint32_t target_b = be32toh(((uint32_t *)target)[1]) & mask_b;
const uint32_t target_c = be32toh(((uint32_t *)target)[2]) & mask_c;
const uint32_t target_d = be32toh(((uint32_t *)target)[3]) & mask_d;
const uint32_t target_e = be32toh(((uint32_t *)target)[4]) & mask_e;
// load text
uint8_t local[pow_sha1_block_length];
memcpy(local, buffer, pow_sha1_block_length);
local[size] = '\x80';
for (int i = size+1; i < pow_sha1_block_length - sizeof(uint64_t) / CHAR_BIT; ++ i) local[i] = '\0';
static const uint32_t x14 = 0x00000000;
static_assert (x14 == 0, "unused (optimized out)");
const uint32_t x15 = size * 8;
// load indices and alphabet to modify the text
const int index0 = indices[0];
const int index1 = indices[1];
const int index2 = indices[2];
const int index3 = indices[3];
const int index4 = indices[4];
const int index5 = indices[5];
const int index6 = indices[6];
const int index7 = indices[7];
static_assert (pow_indices_length == 8, "");
repeat (i,pow_indices_length) {
if (indices[i] != -1) {
local[indices[i]] = 0;
}
}
uint32_t *padded_alphabet = malloc(alphabet_size * sizeof(uint32_t));
repeat (i,alphabet_size) {
uint32_t c = alphabet[i];
if (i - vector_width >= 0) c ^= alphabet[i - vector_width];
padded_alphabet[i] = be32toh(c << (index0 % 4 * CHAR_BIT));
}
// search
bool found = false;
uint64_t cnt = 0;
#pragma omp parallel for shared(found) firstprivate(local) reduction(+:cnt)
repeat (i1, alphabet_size) { if (index1 != -1) local[index1] = alphabet[i1]; if (found or (index1 == -1 and i1 != 0)) continue;
repeat (i7, alphabet_size) { if (index7 != -1) local[index7] = alphabet[i7];
repeat (i6, alphabet_size) { if (index6 != -1) local[index6] = alphabet[i6];
repeat (i5, alphabet_size) { if (index5 != -1) local[index5] = alphabet[i5];
repeat (i4, alphabet_size) { if (index4 != -1) local[index4] = alphabet[i4];
repeat (i3, alphabet_size) { if (index3 != -1) local[index3] = alphabet[i3];
cnt += alphabet_size * (alphabet_size / vector_width * vector_width);
repeat (i2, alphabet_size) { if (index2 != -1) local[index2] = alphabet[i2];
__m256i y0 = _mm256_set1_epi32(be32toh(((uint32_t *)local)[0 ]));
__m256i y1 = _mm256_set1_epi32(be32toh(((uint32_t *)local)[1 ]));
__m256i y2 = _mm256_set1_epi32(be32toh(((uint32_t *)local)[2 ]));
__m256i y3 = _mm256_set1_epi32(be32toh(((uint32_t *)local)[3 ]));
__m256i y4 = _mm256_set1_epi32(be32toh(((uint32_t *)local)[4 ]));
__m256i y5 = _mm256_set1_epi32(be32toh(((uint32_t *)local)[5 ]));
__m256i y6 = _mm256_set1_epi32(be32toh(((uint32_t *)local)[6 ]));
__m256i y7 = _mm256_set1_epi32(be32toh(((uint32_t *)local)[7 ]));
__m256i y8 = _mm256_set1_epi32(be32toh(((uint32_t *)local)[8 ]));
__m256i y9 = _mm256_set1_epi32(be32toh(((uint32_t *)local)[9 ]));
__m256i y10 = _mm256_set1_epi32(be32toh(((uint32_t *)local)[10]));
__m256i y11 = _mm256_set1_epi32(be32toh(((uint32_t *)local)[11]));
__m256i y12 = _mm256_set1_epi32(be32toh(((uint32_t *)local)[12]));
__m256i y13 = _mm256_set1_epi32(be32toh(((uint32_t *)local)[13]));
const __m256i y15 = _mm256_set1_epi32(x15);
for (int i0 = 0; i0 + vector_width - 1 < alphabet_size; i0 += vector_width) {
// set last byte
switch (index0 / 4) {
case 0 : y0 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break;
case 1 : y1 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break;
case 2 : y2 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break;
case 3 : y3 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break;
case 4 : y4 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break;
case 5 : y5 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break;
case 6 : y6 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break;
case 7 : y7 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break;
case 8 : y8 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break;
case 9 : y9 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break;
case 10: y10 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break;
case 11: y11 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break;
case 12: y12 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break;
case 13: y13 ^= _mm256_loadu_si256((__m256i *)(padded_alphabet + i0)); break;
}
// initialize vector
__m256i a = _mm256_set1_epi32(sha1_a0);
__m256i b = _mm256_set1_epi32(sha1_b0);
__m256i c = _mm256_set1_epi32(sha1_c0);
__m256i d = _mm256_set1_epi32(sha1_d0);
__m256i e = _mm256_set1_epi32(sha1_e0);
// round [0, 16)
sha1_roundv(a,&b,c,d,&e, y0 , sha1_k00, sha1_f00);
sha1_roundv(e,&a,b,c,&d, y1 , sha1_k00, sha1_f00);
sha1_roundv(d,&e,a,b,&c, y2 , sha1_k00, sha1_f00);
sha1_roundv(c,&d,e,a,&b, y3 , sha1_k00, sha1_f00);
sha1_roundv(b,&c,d,e,&a, y4 , sha1_k00, sha1_f00);
sha1_roundv(a,&b,c,d,&e, y5 , sha1_k00, sha1_f00);
sha1_roundv(e,&a,b,c,&d, y6 , sha1_k00, sha1_f00);
sha1_roundv(d,&e,a,b,&c, y7 , sha1_k00, sha1_f00);
sha1_roundv(c,&d,e,a,&b, y8 , sha1_k00, sha1_f00);
sha1_roundv(b,&c,d,e,&a, y9 , sha1_k00, sha1_f00);
sha1_roundv(a,&b,c,d,&e, y10, sha1_k00, sha1_f00);
sha1_roundv(e,&a,b,c,&d, y11, sha1_k00, sha1_f00);
sha1_roundv(d,&e,a,b,&c, y12, sha1_k00, sha1_f00);
sha1_roundv(c,&d,e,a,&b, y13, sha1_k00, sha1_f00);
sha1_round (b,&c,d,e,&a, 0, sha1_k00, sha1_f00);
sha1_round (a,&b,c,d,&e, x15, sha1_k00, sha1_f00);
// round [16, 20)
const __m256i y16 = mm256_rol_epi32( y13 ^ y8 ^ y2 ^ y0, 1 );
const __m256i y17 = mm256_rol_epi32( y9 ^ y3 ^ y1, 1 );
const __m256i y18 = mm256_rol_epi32( y15 ^ y10 ^ y4 ^ y2, 1 );
const __m256i y19 = mm256_rol_epi32( y16 ^ y11 ^ y5 ^ y3, 1 );
sha1_roundv(e,&a,b,c,&d, y16, sha1_k00, sha1_f00);
sha1_roundv(d,&e,a,b,&c, y17, sha1_k00, sha1_f00);
sha1_roundv(c,&d,e,a,&b, y18, sha1_k00, sha1_f00);
sha1_roundv(b,&c,d,e,&a, y19, sha1_k00, sha1_f00);
// round [20, 40)
const __m256i y20 = mm256_rol_epi32( y17 ^ y12 ^ y6 ^ y4 , 1 );
const __m256i y21 = mm256_rol_epi32( y18 ^ y13 ^ y7 ^ y5 , 1 );
const __m256i y22 = mm256_rol_epi32( y19 ^ y8 ^ y6 , 1 );
const __m256i y23 = mm256_rol_epi32( y20 ^ y15 ^ y9 ^ y7 , 1 );
const __m256i y24 = mm256_rol_epi32( y21 ^ y16 ^ y10 ^ y8 , 1 );
const __m256i y25 = mm256_rol_epi32( y22 ^ y17 ^ y11 ^ y9 , 1 );
const __m256i y26 = mm256_rol_epi32( y23 ^ y18 ^ y12 ^ y10, 1 );
const __m256i y27 = mm256_rol_epi32( y24 ^ y19 ^ y13 ^ y11, 1 );
const __m256i y28 = mm256_rol_epi32( y25 ^ y20 ^ y12, 1 );
const __m256i y29 = mm256_rol_epi32( y26 ^ y21 ^ y15 ^ y13, 1 );
const __m256i y30 = mm256_rol_epi32( y27 ^ y22 ^ y16 , 1 );
const __m256i y31 = mm256_rol_epi32( y28 ^ y23 ^ y17 ^ y15, 1 );
const __m256i y32 = mm256_rol_epi32( y29 ^ y24 ^ y18 ^ y16, 1 );
const __m256i y33 = mm256_rol_epi32( y30 ^ y25 ^ y19 ^ y17, 1 );
const __m256i y34 = mm256_rol_epi32( y31 ^ y26 ^ y20 ^ y18, 1 );
const __m256i y35 = mm256_rol_epi32( y32 ^ y27 ^ y21 ^ y19, 1 );
const __m256i y36 = mm256_rol_epi32( y33 ^ y28 ^ y22 ^ y20, 1 );
const __m256i y37 = mm256_rol_epi32( y34 ^ y29 ^ y23 ^ y21, 1 );
const __m256i y38 = mm256_rol_epi32( y35 ^ y30 ^ y24 ^ y22, 1 );
const __m256i y39 = mm256_rol_epi32( y36 ^ y31 ^ y25 ^ y23, 1 );
sha1_roundv(a,&b,c,d,&e, y20, sha1_k20, sha1_f20);
sha1_roundv(e,&a,b,c,&d, y21, sha1_k20, sha1_f20);
sha1_roundv(d,&e,a,b,&c, y22, sha1_k20, sha1_f20);
sha1_roundv(c,&d,e,a,&b, y23, sha1_k20, sha1_f20);
sha1_roundv(b,&c,d,e,&a, y24, sha1_k20, sha1_f20);
sha1_roundv(a,&b,c,d,&e, y25, sha1_k20, sha1_f20);
sha1_roundv(e,&a,b,c,&d, y26, sha1_k20, sha1_f20);
sha1_roundv(d,&e,a,b,&c, y27, sha1_k20, sha1_f20);
sha1_roundv(c,&d,e,a,&b, y28, sha1_k20, sha1_f20);
sha1_roundv(b,&c,d,e,&a, y29, sha1_k20, sha1_f20);
sha1_roundv(a,&b,c,d,&e, y30, sha1_k20, sha1_f20);
sha1_roundv(e,&a,b,c,&d, y31, sha1_k20, sha1_f20);
sha1_roundv(d,&e,a,b,&c, y32, sha1_k20, sha1_f20);
sha1_roundv(c,&d,e,a,&b, y33, sha1_k20, sha1_f20);
sha1_roundv(b,&c,d,e,&a, y34, sha1_k20, sha1_f20);
sha1_roundv(a,&b,c,d,&e, y35, sha1_k20, sha1_f20);
sha1_roundv(e,&a,b,c,&d, y36, sha1_k20, sha1_f20);
sha1_roundv(d,&e,a,b,&c, y37, sha1_k20, sha1_f20);
sha1_roundv(c,&d,e,a,&b, y38, sha1_k20, sha1_f20);
sha1_roundv(b,&c,d,e,&a, y39, sha1_k20, sha1_f20);
// round [40, 60)
const __m256i y40 = mm256_rol_epi32( y37 ^ y32 ^ y26 ^ y24, 1 );
const __m256i y41 = mm256_rol_epi32( y38 ^ y33 ^ y27 ^ y25, 1 );
const __m256i y42 = mm256_rol_epi32( y39 ^ y34 ^ y28 ^ y26, 1 );
const __m256i y43 = mm256_rol_epi32( y40 ^ y35 ^ y29 ^ y27, 1 );
const __m256i y44 = mm256_rol_epi32( y41 ^ y36 ^ y30 ^ y28, 1 );
const __m256i y45 = mm256_rol_epi32( y42 ^ y37 ^ y31 ^ y29, 1 );
const __m256i y46 = mm256_rol_epi32( y43 ^ y38 ^ y32 ^ y30, 1 );
const __m256i y47 = mm256_rol_epi32( y44 ^ y39 ^ y33 ^ y31, 1 );
const __m256i y48 = mm256_rol_epi32( y45 ^ y40 ^ y34 ^ y32, 1 );
const __m256i y49 = mm256_rol_epi32( y46 ^ y41 ^ y35 ^ y33, 1 );
const __m256i y50 = mm256_rol_epi32( y47 ^ y42 ^ y36 ^ y34, 1 );
const __m256i y51 = mm256_rol_epi32( y48 ^ y43 ^ y37 ^ y35, 1 );
const __m256i y52 = mm256_rol_epi32( y49 ^ y44 ^ y38 ^ y36, 1 );
const __m256i y53 = mm256_rol_epi32( y50 ^ y45 ^ y39 ^ y37, 1 );
const __m256i y54 = mm256_rol_epi32( y51 ^ y46 ^ y40 ^ y38, 1 );
const __m256i y55 = mm256_rol_epi32( y52 ^ y47 ^ y41 ^ y39, 1 );
const __m256i y56 = mm256_rol_epi32( y53 ^ y48 ^ y42 ^ y40, 1 );
const __m256i y57 = mm256_rol_epi32( y54 ^ y49 ^ y43 ^ y41, 1 );
const __m256i y58 = mm256_rol_epi32( y55 ^ y50 ^ y44 ^ y42, 1 );
const __m256i y59 = mm256_rol_epi32( y56 ^ y51 ^ y45 ^ y43, 1 );
sha1_roundv(a,&b,c,d,&e, y40, sha1_k40, sha1_f40);
sha1_roundv(e,&a,b,c,&d, y41, sha1_k40, sha1_f40);
sha1_roundv(d,&e,a,b,&c, y42, sha1_k40, sha1_f40);
sha1_roundv(c,&d,e,a,&b, y43, sha1_k40, sha1_f40);
sha1_roundv(b,&c,d,e,&a, y44, sha1_k40, sha1_f40);
sha1_roundv(a,&b,c,d,&e, y45, sha1_k40, sha1_f40);
sha1_roundv(e,&a,b,c,&d, y46, sha1_k40, sha1_f40);
sha1_roundv(d,&e,a,b,&c, y47, sha1_k40, sha1_f40);
sha1_roundv(c,&d,e,a,&b, y48, sha1_k40, sha1_f40);
sha1_roundv(b,&c,d,e,&a, y49, sha1_k40, sha1_f40);
sha1_roundv(a,&b,c,d,&e, y50, sha1_k40, sha1_f40);
sha1_roundv(e,&a,b,c,&d, y51, sha1_k40, sha1_f40);
sha1_roundv(d,&e,a,b,&c, y52, sha1_k40, sha1_f40);
sha1_roundv(c,&d,e,a,&b, y53, sha1_k40, sha1_f40);
sha1_roundv(b,&c,d,e,&a, y54, sha1_k40, sha1_f40);
sha1_roundv(a,&b,c,d,&e, y55, sha1_k40, sha1_f40);
sha1_roundv(e,&a,b,c,&d, y56, sha1_k40, sha1_f40);
sha1_roundv(d,&e,a,b,&c, y57, sha1_k40, sha1_f40);
sha1_roundv(c,&d,e,a,&b, y58, sha1_k40, sha1_f40);
sha1_roundv(b,&c,d,e,&a, y59, sha1_k40, sha1_f40);
// round [60, 80)
const __m256i y60 = mm256_rol_epi32( y57 ^ y52 ^ y46 ^ y44, 1 );
const __m256i y61 = mm256_rol_epi32( y58 ^ y53 ^ y47 ^ y45, 1 );
const __m256i y62 = mm256_rol_epi32( y59 ^ y54 ^ y48 ^ y46, 1 );
const __m256i y63 = mm256_rol_epi32( y60 ^ y55 ^ y49 ^ y47, 1 );
const __m256i y64 = mm256_rol_epi32( y61 ^ y56 ^ y50 ^ y48, 1 );
const __m256i y65 = mm256_rol_epi32( y62 ^ y57 ^ y51 ^ y49, 1 );
const __m256i y66 = mm256_rol_epi32( y63 ^ y58 ^ y52 ^ y50, 1 );
const __m256i y67 = mm256_rol_epi32( y64 ^ y59 ^ y53 ^ y51, 1 );
const __m256i y68 = mm256_rol_epi32( y65 ^ y60 ^ y54 ^ y52, 1 );
const __m256i y69 = mm256_rol_epi32( y66 ^ y61 ^ y55 ^ y53, 1 );
const __m256i y70 = mm256_rol_epi32( y67 ^ y62 ^ y56 ^ y54, 1 );
const __m256i y71 = mm256_rol_epi32( y68 ^ y63 ^ y57 ^ y55, 1 );
const __m256i y72 = mm256_rol_epi32( y69 ^ y64 ^ y58 ^ y56, 1 );
const __m256i y73 = mm256_rol_epi32( y70 ^ y65 ^ y59 ^ y57, 1 );
const __m256i y74 = mm256_rol_epi32( y71 ^ y66 ^ y60 ^ y58, 1 );
const __m256i y75 = mm256_rol_epi32( y72 ^ y67 ^ y61 ^ y59, 1 );
const __m256i y76 = mm256_rol_epi32( y73 ^ y68 ^ y62 ^ y60, 1 );
const __m256i y77 = mm256_rol_epi32( y74 ^ y69 ^ y63 ^ y61, 1 );
const __m256i y78 = mm256_rol_epi32( y75 ^ y70 ^ y64 ^ y62, 1 );
const __m256i y79 = mm256_rol_epi32( y76 ^ y71 ^ y65 ^ y63, 1 );
sha1_roundv(a,&b,c,d,&e, y60, sha1_k60, sha1_f60);
sha1_roundv(e,&a,b,c,&d, y61, sha1_k60, sha1_f60);
sha1_roundv(d,&e,a,b,&c, y62, sha1_k60, sha1_f60);
sha1_roundv(c,&d,e,a,&b, y63, sha1_k60, sha1_f60);
sha1_roundv(b,&c,d,e,&a, y64, sha1_k60, sha1_f60);
sha1_roundv(a,&b,c,d,&e, y65, sha1_k60, sha1_f60);
sha1_roundv(e,&a,b,c,&d, y66, sha1_k60, sha1_f60);
sha1_roundv(d,&e,a,b,&c, y67, sha1_k60, sha1_f60);
sha1_roundv(c,&d,e,a,&b, y68, sha1_k60, sha1_f60);
sha1_roundv(b,&c,d,e,&a, y69, sha1_k60, sha1_f60);
sha1_roundv(a,&b,c,d,&e, y70, sha1_k60, sha1_f60);
sha1_roundv(e,&a,b,c,&d, y71, sha1_k60, sha1_f60);
sha1_roundv(d,&e,a,b,&c, y72, sha1_k60, sha1_f60);
sha1_roundv(c,&d,e,a,&b, y73, sha1_k60, sha1_f60);
sha1_roundv(b,&c,d,e,&a, y74, sha1_k60, sha1_f60);
sha1_roundv(a,&b,c,d,&e, y75, sha1_k60, sha1_f60);
sha1_roundv(e,&a,b,c,&d, y76, sha1_k60, sha1_f60);
sha1_roundv(d,&e,a,b,&c, y77, sha1_k60, sha1_f60);
sha1_roundv(c,&d,e,a,&b, y78, sha1_k60, sha1_f60);
sha1_roundv(b,&c,d,e,&a, y79, sha1_k60, sha1_f60);
// compare result
a = _mm256_add_epi32(a, _mm256_set1_epi32(sha1_a0));
const __m256i cmp_a = _mm256_cmpeq_epi32(a & _mm256_set1_epi32(mask_a), _mm256_set1_epi32(target_a));
if (unlikely(not _mm256_testz_si256(cmp_a, cmp_a))) {
b = _mm256_add_epi32(b, _mm256_set1_epi32(sha1_b0));
c = _mm256_add_epi32(c, _mm256_set1_epi32(sha1_c0));
d = _mm256_add_epi32(d, _mm256_set1_epi32(sha1_d0));
e = _mm256_add_epi32(e, _mm256_set1_epi32(sha1_e0));
const __m256i cmp_b = _mm256_cmpeq_epi32(b & _mm256_set1_epi32(mask_b), _mm256_set1_epi32(target_b));
const __m256i cmp_c = _mm256_cmpeq_epi32(c & _mm256_set1_epi32(mask_c), _mm256_set1_epi32(target_c));
const __m256i cmp_d = _mm256_cmpeq_epi32(d & _mm256_set1_epi32(mask_d), _mm256_set1_epi32(target_d));
const __m256i cmp_e = _mm256_cmpeq_epi32(e & _mm256_set1_epi32(mask_e), _mm256_set1_epi32(target_e));
const __m256i cmp_ad = cmp_a & cmp_d;
const __m256i cmp_bc = cmp_b & cmp_c;
const __m256i cmp_ade = cmp_ad & cmp_e;
if (unlikely(not _mm256_testz_si256(cmp_ade, cmp_bc))) {
uint32_t cmp[vector_width]; _mm256_storeu_si256((__m256i *)cmp, cmp_ade & cmp_bc);
repeat (i, vector_width) if (not found and cmp[i]) {
#pragma omp critical
{
if (not found) {
found = true;
memcpy(buffer, local, pow_sha1_block_length);
buffer[index0] = alphabet[i0 + i];
buffer[size] = 0;
}
}
}
}
}
// break
} if (index2 == -1 or found) break;
} if (index3 == -1 or found) break;
} if (index4 == -1 or found) break;
} if (index5 == -1 or found) break;
} if (index6 == -1 or found) break;
} if (index7 == -1 or found) break;
}
}
// leave
free(padded_alphabet);
pow_sha1_count = cnt;
return found;
}
|
ompPrivate.c |
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char **argv){
int threadCount = 20;
omp_set_num_threads(threadCount);
// still serial here
int i = 6;
int *v = (int*) calloc(threadCount, sizeof(int));
int veryBad = 0;
// fork the program
#pragma omp parallel firstprivate(i)
{
// stuff in this scope gets executed by all OpenMP threads
int rank = omp_get_thread_num();
int size = omp_get_num_threads();
i = i + rank;
// good parallel
#pragma omp critical
{
// force threads to take turn based on rank
v[rank] = rank;
// printf("v[%d]=%d\n", rank, v[rank]);
veryBad += rank;
printf("veryBad = %d (from rank %d)\n",
veryBad, rank);
printf("i=%d\n", i);
}
}
printf("veryBad = %d\n", veryBad);
exit(0);
return 0;
}
|
dot_product.c | #include <mpi.h>
#ifdef _CIVL
#include <stdlib.h>
#include <civlc.cvh>
#endif
#define HYPRE_BigInt int
// seq_mv.h :
typedef struct
{
double *data;
int size;
/* Does the Vector create/destroy `data'? */
int owns_data;
/* For multivectors...*/
int num_vectors; /* the above "size" is size of one vector */
int multivec_storage_method;
/* ...if 0, store colwise v0[0], v0[1], ..., v1[0], v1[1], ... v2[0]... */
/* ...if 1, store rowwise v0[0], v1[0], ..., v0[1], v1[1], ... */
/* With colwise storage, vj[i] = data[ j*size + i]
With rowwise storage, vj[i] = data[ j + num_vectors*i] */
int vecstride, idxstride;
/* ... so vj[i] = data[ j*vecstride + i*idxstride ] regardless of row_storage.*/
} hypre_Vector;
#define hypre_VectorData(vector) ((vector) -> data)
#define hypre_VectorSize(vector) ((vector) -> size)
#define hypre_VectorOwnsData(vector) ((vector) -> owns_data)
#define hypre_VectorNumVectors(vector) ((vector) -> num_vectors)
#define hypre_VectorMultiVecStorageMethod(vector) ((vector) -> multivec_storage_method)
#define hypre_VectorVectorStride(vector) ((vector) -> vecstride )
#define hypre_VectorIndexStride(vector) ((vector) -> idxstride )
// vector.c :
double hypre_SeqVectorInnerProd( hypre_Vector *x,
hypre_Vector *y )
{
double *x_data = hypre_VectorData(x);
double *y_data = hypre_VectorData(y);
int size = hypre_VectorSize(x);
int i;
double result = 0.0;
size *=hypre_VectorNumVectors(x);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:result) schedule(static)
#endif
for (i = 0; i < size; i++)
result += y_data[i] * x_data[i];
return result;
}
// parcsr_mv.h:
typedef struct
{
int length;
HYPRE_BigInt row_start;
HYPRE_BigInt row_end;
int storage_length;
int *proc_list;
HYPRE_BigInt *row_start_list;
HYPRE_BigInt *row_end_list;
int *sort_index;
} hypre_IJAssumedPart;
typedef struct
{
MPI_Comm comm;
HYPRE_BigInt global_size;
HYPRE_BigInt first_index;
HYPRE_BigInt last_index;
HYPRE_BigInt *partitioning;
hypre_Vector *local_vector;
int owns_data;
int owns_partitioning;
hypre_IJAssumedPart *assumed_partition;
} hypre_ParVector;
#define hypre_ParVectorComm(vector) ((vector) -> comm)
#define hypre_ParVectorGlobalSize(vector) ((vector) -> global_size)
#define hypre_ParVectorFirstIndex(vector) ((vector) -> first_index)
#define hypre_ParVectorLastIndex(vector) ((vector) -> last_index)
#define hypre_ParVectorPartitioning(vector) ((vector) -> partitioning)
#define hypre_ParVectorLocalVector(vector) ((vector) -> local_vector)
#define hypre_ParVectorOwnsData(vector) ((vector) -> owns_data)
#define hypre_ParVectorOwnsPartitioning(vector) ((vector) -> owns_partitioning)
#define hypre_ParVectorNumVectors(vector)\
(hypre_VectorNumVectors( hypre_ParVectorLocalVector(vector) ))
#define hypre_ParVectorAssumedPartition(vector) ((vector) -> assumed_partition)
// par_vector.c :
double
hypre_ParVectorInnerProd( hypre_ParVector *x,
hypre_ParVector *y )
{
MPI_Comm comm = hypre_ParVectorComm(x);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
double result = 0.0;
double local_result = hypre_SeqVectorInnerProd(x_local, y_local);
MPI_Allreduce(&local_result, &result, 1, MPI_DOUBLE, MPI_SUM, comm);
return result;
}
/* Stripped down driver for AMG2013 parallel inner product routine. */
#define XVET x.local_vector
#define YVET y.local_vector
#ifdef _CIVL
$input int VECTOR_LENGTH;
$assume(0 <= VECTOR_LENGTH && VECTOR_LENGTH < 10);
#endif
int main() {
hypre_ParVector x, y;
int nprocs;
MPI_Init(NULL, NULL);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
#ifdef _CIVL
x.comm = MPI_COMM_WORLD;
y.comm = MPI_COMM_WORLD;
XVET = (hypre_Vector *)malloc(sizeof(hypre_Vector));
YVET = (hypre_Vector *)malloc(sizeof(hypre_Vector));
XVET->data = (double *)malloc(sizeof(double) * VECTOR_LENGTH * nprocs);
YVET->data = (double *)malloc(sizeof(double) * VECTOR_LENGTH * nprocs);
XVET->size = VECTOR_LENGTH;
YVET->size = VECTOR_LENGTH;
XVET->num_vectors = nprocs;
YVET->num_vectors = nprocs;
#endif
double result = hypre_ParVectorInnerProd(&x, &y);
MPI_Finalize();
free(XVET->data);
free(YVET->data);
free(XVET);
free(YVET);
#ifdef DEBUG
#include <stdio.h>
printf("result = %f\n", result);
#endif
return result != 0;
}
|
kmp_sch_simd_runtime_static.c | // RUN: %libomp-compile && %libomp-run
// RUN: %libomp-run 1 && %libomp-run 2
// The test checks schedule(simd:runtime)
// in combination with OMP_SCHEDULE=static[,chunk]
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#if defined(WIN32) || defined(_WIN32)
#include <windows.h>
#define delay() Sleep(1);
#define seten(a,b,c) _putenv_s((a),(b))
#else
#include <unistd.h>
#define delay() usleep(10);
#define seten(a,b,c) setenv((a),(b),(c))
#endif
#define SIMD_LEN 4
int err = 0;
// ---------------------------------------------------------------------------
// Various definitions copied from OpenMP RTL.
enum sched {
kmp_sch_static_balanced_chunked = 45,
kmp_sch_guided_simd = 46,
kmp_sch_runtime_simd = 47,
};
typedef unsigned u32;
typedef long long i64;
typedef unsigned long long u64;
typedef struct {
int reserved_1;
int flags;
int reserved_2;
int reserved_3;
char *psource;
} id;
#ifdef __cplusplus
extern "C" {
#endif
int __kmpc_global_thread_num(id*);
void __kmpc_barrier(id*, int gtid);
void __kmpc_dispatch_init_4(id*, int, enum sched, int, int, int, int);
void __kmpc_dispatch_init_8(id*, int, enum sched, i64, i64, i64, i64);
int __kmpc_dispatch_next_4(id*, int, void*, void*, void*, void*);
int __kmpc_dispatch_next_8(id*, int, void*, void*, void*, void*);
#ifdef __cplusplus
} // extern "C"
#endif
// End of definitions copied from OpenMP RTL.
// ---------------------------------------------------------------------------
static id loc = {0, 2, 0, 0, ";file;func;0;0;;"};
// ---------------------------------------------------------------------------
void
run_loop(
int loop_lb, // Loop lower bound.
int loop_ub, // Loop upper bound.
int loop_st, // Loop stride.
int lchunk
) {
static int volatile loop_sync = 0;
int lb; // Chunk lower bound.
int ub; // Chunk upper bound.
int st; // Chunk stride.
int rc;
int nthreads = omp_get_num_threads();
int tid = omp_get_thread_num();
int gtid = __kmpc_global_thread_num(&loc);
int last;
int tc = (loop_ub - loop_lb) / loop_st + 1;
int ch;
int no_chunk = 0;
if (lchunk == 0) {
no_chunk = 1;
lchunk = 1;
}
ch = lchunk * SIMD_LEN;
#if _DEBUG > 1
printf("run_loop gtid %d tid %d (lb=%d, ub=%d, st=%d, ch=%d)\n",
gtid, tid, (int)loop_lb, (int)loop_ub, (int)loop_st, lchunk);
#endif
// Don't test degenerate cases that should have been discovered by codegen.
if (loop_st == 0)
return;
if (loop_st > 0 ? loop_lb > loop_ub : loop_lb < loop_ub)
return;
__kmpc_dispatch_init_4(&loc, gtid, kmp_sch_runtime_simd,
loop_lb, loop_ub, loop_st, SIMD_LEN);
{
// Let the master thread handle the chunks alone.
int chunk; // No of current chunk.
int last_ub; // Upper bound of the last processed chunk.
u64 cur; // Number of interations in current chunk.
u64 max; // Max allowed iterations for current chunk.
int undersized = 0;
last_ub = loop_ub;
chunk = 0;
max = (loop_ub - loop_lb) / loop_st + 1;
// The first chunk can consume all iterations.
while (__kmpc_dispatch_next_4(&loc, gtid, &last, &lb, &ub, &st)) {
++ chunk;
#if _DEBUG
printf("th %d: chunk=%d, lb=%d, ub=%d ch %d\n",
tid, chunk, (int)lb, (int)ub, (int)(ub-lb+1));
#endif
// Check if previous chunk (it is not the final chunk) is undersized.
if (undersized)
printf("Error with chunk %d, th %d, err %d\n", chunk, tid, ++err);
if (loop_st > 0) {
if (!(ub <= loop_ub))
printf("Error with ub %d, %d, ch %d, err %d\n",
(int)ub, (int)loop_ub, chunk, ++err);
if (!(lb <= ub))
printf("Error with bounds %d, %d, %d, err %d\n",
(int)lb, (int)ub, chunk, ++err);
} else {
if (!(ub >= loop_ub))
printf("Error with ub %d, %d, %d, err %d\n",
(int)ub, (int)loop_ub, chunk, ++err);
if (!(lb >= ub))
printf("Error with bounds %d, %d, %d, err %d\n",
(int)lb, (int)ub, chunk, ++err);
}; // if
// Stride should not change.
if (!(st == loop_st))
printf("Error with st %d, %d, ch %d, err %d\n",
(int)st, (int)loop_st, chunk, ++err);
cur = ( ub - lb ) / loop_st + 1;
// Guided scheduling uses FP computations, so current chunk may
// be a bit bigger (+1) than allowed maximum.
if (!( cur <= max + 1))
printf("Error with iter %llu, %llu, err %d\n", cur, max, ++err);
// Update maximum for the next chunk.
if (last) {
if (!no_chunk && cur > ch && nthreads > 1)
printf("Error: too big last chunk %d (%d), tid %d, err %d\n",
(int)cur, ch, tid, ++err);
} else {
if (cur % ch)
printf("Error with chunk %d, %d, ch %d, tid %d, err %d\n",
chunk, (int)cur, ch, tid, ++err);
}
if (cur < max)
max = cur;
last_ub = ub;
undersized = (cur < ch);
#if _DEBUG > 1
if (last)
printf("under%d cur %d, ch %d, tid %d, ub %d, lb %d, st %d =======\n",
undersized,cur,ch,tid,ub,lb,loop_st);
#endif
} // while
// Must have the right last iteration index.
if (loop_st > 0) {
if (!(last_ub <= loop_ub))
printf("Error with last1 %d, %d, ch %d, err %d\n",
(int)last_ub, (int)loop_ub, chunk, ++err);
if (last && !(last_ub + loop_st > loop_ub))
printf("Error with last2 %d, %d, %d, ch %d, err %d\n",
(int)last_ub, (int)loop_st, (int)loop_ub, chunk, ++err);
} else {
if (!(last_ub >= loop_ub))
printf("Error with last1 %d, %d, ch %d, err %d\n",
(int)last_ub, (int)loop_ub, chunk, ++err);
if (last && !(last_ub + loop_st < loop_ub))
printf("Error with last2 %d, %d, %d, ch %d, err %d\n",
(int)last_ub, (int)loop_st, (int)loop_ub, chunk, ++err);
} // if
}
__kmpc_barrier(&loc, gtid);
} // run_loop
int main(int argc, char *argv[])
{
int chunk = 0;
if (argc > 1) {
char *buf = malloc(8 + strlen(argv[1]));
// expect chunk size as a parameter
chunk = atoi(argv[1]);
strcpy(buf,"static,");
strcat(buf,argv[1]);
seten("OMP_SCHEDULE",buf,1);
printf("Testing schedule(simd:%s)\n", buf);
free(buf);
} else {
seten("OMP_SCHEDULE","static",1);
printf("Testing schedule(simd:static)\n");
}
#pragma omp parallel// num_threads(num_th)
run_loop(0, 26, 1, chunk);
if (err) {
printf("failed, err = %d\n", err);
return 1;
} else {
printf("passed\n");
return 0;
}
}
|
mpi_comm.c | #include "../include/mpi_comm.h"
// compute start and end of each small block
// [start, end)
int compute_start(int rank, int prosCount, int size) {
int blockSize = size/prosCount, margin = size%prosCount;
return (blockSize + 1) * rank - Max(rank - margin, 0);
}
int compute_end(int rank, int prosCount, int size) {
int blockSize = size/prosCount, margin = size%prosCount;
int l = blockSize + (rank < margin);
return compute_start(rank, prosCount, size) + l;
}
// divide the original matrix by rows, [start, end)
// to construct a MPI datatype for communication
int pack_subMatrix(MPI_Datatype *matType, struct Matrix *matp, int start, int end) {
int m = matp->m;
if(start < 0 || start >= m || end <= 0 || end > m || end <= start)
return 0;
int count = end - start;
int *blockLength = (int*)malloc(sizeof(int) * count);
MPI_Aint *displacement = (MPI_Aint*)malloc(sizeof(MPI_Aint) * count);
MPI_Datatype *types = (MPI_Datatype*)malloc(sizeof(MPI_Datatype) * count);
MPI_Aint baseAddr, nowAddr;
for(int i = start, j = 0;i < end;i++, j=i-start) {
if(!j)
MPI_Get_address((const void*)matp->mat[i]->vec, &baseAddr);
MPI_Get_address((const void*)matp->mat[i]->vec, &nowAddr);
blockLength[j] = matp->n;
displacement[j] = nowAddr - baseAddr;
types[j] = MPI_FLOAT;
}
MPI_Type_create_struct(count, blockLength, displacement, types, matType);
MPI_Type_commit(matType);
free(blockLength);
free(displacement);
free(types);
return 1;
}
// unpack the MPI receive buffer and put floats back to the matrix
int unpack_subMatrix(void *recvBuffer, struct Matrix *matp, int start, int end, int len) {
int i, threadCount = omp_get_num_procs();
#pragma omp parallel for num_threads(threadCount) default(none) shared(start, end, len, matp, recvBuffer) private(i)
for(i = start;i < end;i++) {
int j = i - start;
matp->mat[i]->length = len;
for(int k = 0;k < len;k++)
matp->mat[i]->vec[k] = ((float*)recvBuffer)[len*j + k];
}
}
// send (part of) the matrix to a specific process
void send_mat_by_row(int row_start, int row_end, struct Matrix *matp, int sourceRank, int tarRank) {
MPI_Datatype matType;
pack_subMatrix(&matType, matp, row_start, row_end);
// Be careful, only send 1 user-defined datatype to workers
MPI_Send(matp->mat[row_start]->vec, 1, matType, tarRank, sourceRank, MPI_COMM_WORLD);
MPI_Type_free(&matType);
}
// receive (part of) the matrix from the master process
void recv_mat_by_row(int row_start, int row_end, int row_len, struct Matrix *matp, int sourceRank) {
int bufSize = (row_end - row_start) * row_len;
float *recvBuffer = (float*)malloc(sizeof(float) * bufSize);
MPI_Status recvResult;
MPI_Recv(recvBuffer, bufSize, MPI_FLOAT, sourceRank, sourceRank, MPI_COMM_WORLD, &recvResult);
unpack_subMatrix(recvBuffer, matp, row_start, row_end, row_len);
free(recvBuffer);
}
// broadcast (part of) the matrix to all nodes in the communicator
// receivers should initialize matp first.
void broadcast_mat_by_row(int row_start, int row_end, struct Matrix *matp, int sourceRank, int rank) {
if(rank == sourceRank) {
MPI_Datatype matType;
pack_subMatrix(&matType, matp, row_start, row_end);
MPI_Bcast(matp->mat[row_start]->vec, 1, matType, sourceRank, MPI_COMM_WORLD);
} else {
int bufSize = (row_end - row_start) * matp->n;
float *recvBuffer = (float*)malloc(sizeof(float) * bufSize);
MPI_Bcast(recvBuffer, bufSize, MPI_FLOAT, sourceRank, MPI_COMM_WORLD);
unpack_subMatrix(recvBuffer, matp, 0, row_end-row_start, matp->n);
}
}
// scatter (part of) the matrix to nodes in the communicator
// receivers should initialize matp first.
void scatter_mat_by_row(struct Matrix *matp, int sourceRank, int rank, int prosCount) {
int recvCount = matp->m * matp->n,
*sendCounts = (int*)malloc(sizeof(int) * prosCount),
*displs = (int*)malloc(sizeof(int) * prosCount);
float *sendBuffer = (float*)malloc(sizeof(float) * matp->m * matp->n),
*recvBuffer = (float*)malloc(sizeof(float) * matp->m * matp->n);
if(rank == sourceRank) {
// gather data from matrix.
int i, threadCount = omp_get_num_procs();
#pragma omp parallel for num_threads(threadCount) default(none) shared(sendBuffer, matp) private(i)
for(i = 0;i < matp->m;i++)
for(int j = 0;j < matp->n;j++)
sendBuffer[matp->n*i+j] = matp->mat[i]->vec[j];
for(int i = 0;i < prosCount;i++) {
int row_start = compute_start(i, prosCount, matp->m),
row_end = compute_end(i, prosCount, matp->m);
sendCounts[i] = (row_end - row_start) * matp->n;
displs[i] = row_start * matp->n;
}
}
MPI_Scatterv(sendBuffer, sendCounts, displs, MPI_FLOAT, recvBuffer, recvCount, MPI_FLOAT, sourceRank, MPI_COMM_WORLD);
if(rank != sourceRank) {
unpack_subMatrix(recvBuffer, matp, 0, matp->m, matp->n);
}
free(sendBuffer), free(recvBuffer);
free(sendCounts), free(displs);
}
// gather matrix from nodes in the communicator
// receiver should initalize matp first.
void gather_mat_by_row(struct Matrix *matp, int tarRank, int rank, int prosCount) {
int sendCount = matp->m * matp->n,
*recvCounts = (int*)malloc(sizeof(int) * prosCount),
*displs = (int*)malloc(sizeof(int) * prosCount);
float *sendBuffer = (float*)malloc(sizeof(float) * matp->m * matp->n),
*recvBuffer = (float*)malloc(sizeof(float) * matp->m * matp->n);
if(rank != tarRank) {
int i, threadCount = omp_get_num_procs();
#pragma omp parallel for num_threads(threadCount) default(none) shared(sendBuffer, matp) private(i)
for(i = 0;i < matp->m;i++)
for(int j = 0;j < matp->n;j++)
sendBuffer[matp->n*i+j] = matp->mat[i]->vec[j];
} else {
for(int i = 0;i < prosCount;i++) {
// skip the receiver itself
if(i == rank) {
recvCounts[i] = displs[i] = 0;
continue;
}
int row_start = compute_start(i, prosCount, matp->m),
row_end = compute_end(i, prosCount, matp->m);
recvCounts[i] = (row_end - row_start) * matp->n;
displs[i] = row_start * matp->n;
}
}
MPI_Gatherv(sendBuffer, sendCount, MPI_FLOAT, recvBuffer, recvCounts, displs, MPI_FLOAT, tarRank, MPI_COMM_WORLD);
if(rank == tarRank) {
// the receiver has to fill its own part into the matrix
unpack_subMatrix(recvBuffer, matp, 0, matp->m, matp->n);
}
free(sendBuffer), free(recvBuffer);
free(recvCounts), free(displs);
} |
solver_benchmark.c | //
////
////////
////////////////
////////////////////////////////
////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// COMPANION SOFTWARE TO: Assessment of localized and randomized algorithms for electronic structure
// USAGE: <executable> <xyz structure file> <chemical potential> <temperature> <solver> <solver parameters ...>
// C99 syntax, OpenMP-based shared memory parallelism, designed to run on a single multi-core supercomputer node
// MPI parallelism is included for PEXSI functionality (solver=2) & all other solvers should use 1 MPI process
// MPI is being used w/ an underlying shared-memory (e.g. single node) system in mind (i.e. nonuniform memory distribution)
// UNITS: energies/temperatures & distances are in electronvolts/Angstroms for input/output & Rydbergs/Bohr internally
// stress tensor is in gigapascals (GPa)
// Available solvers:
// # | F-D approx. | trace approx. | O(N^p) | solver parameters
//----+-------------+-------------------+--------+-------------------
// 0 | none | none | 1 | none [pre & post processing only]
// 1 | exact | exact | 3 | none
// 2 | rational | exact (PEXSI) | 2 (3D) | <#/2 of poles>
// 3 | polynomial | exact (iterative) | 2 | <# of Cheby.> <res. tol.>
// 4 | rational | exact (iterative) | 2 | <#/2 of poles> <res. tol.>
// 5 | polynomial | local | 1 | <# of Cheby.> <res. tol.> <loc. rad.>
// 6 | rational | local | 1 | <#/2 of poles> <res. tol.> <loc. rad.>
// 7 | polynomial | random | 1 | <# of Cheby.> <res. tol.> <loc. rad.> <seed> <# of samples>
// 8 | rational | random | 1 | <#/2 of poles> <res. tol.> <loc. rad.> <seed> <# of samples>
// 9 | rational | local (infinite) | 0 | <#/2 of poles> <res. tol.> <loc. rad.>
// 10 | exact | k-grid (infinite) | 0 | <# of k-grid pts. per dimension> <loc. rad.>
// Available testers:
// -1 | none | local (infinite) | 0 | <res. tol.> <min. rad.> <max. rad.> <# rad.> [precondition test]
// INPUT KEY:
// <#/2 of poles> : number of complex-conjugate pole pairs in the rational approximation of the Fermi-Dirac function
// <# of Cheby.> : number of Chebyshev polynomials used to approximate the Fermi-Dirac function
// <res. tol.> : residual 2-norm stopping criterion for iterative linear solvers (conjugate gradient & conjugate residual)
// <loc. rad.> : localization radius that defines the sparsity pattern of local Hamiltonians (solver = 5,6)
// & the coloring scheme for uncorrelated complex rotors (solver = 7,8)
// <seed> : integer seed for the pseudo-random number generator
// <# of samples> : the number of samples drawn from the colored complex rotor multi-vector distribution
// <# of k-grid pts. per dimension> : the number of points assigned to the k-point grid per reciprocal-space dimension (3)
// <min. rad.> <max. rad.> <# rad.> : minimum/maximum/number-of radius values for a grid of preconditioner localization radii
// Structure file format (*.xyz) for monoatomic copper clusters:
// <# of atoms>
//
// Cu <x coordinate of atom #1> <y coordinate of atom #1> <z coordinate of atom #1>
// ...
// Cu <x coordinate of atom #N> <y coordinate of atom #N> <z coordinate of atom #N>
// NOTE: for solver = 9, the positions of the 2nd, 3rd, & 4th atoms relative to the 1st define the crystal lattice vectors
// OUTPUT:
// Total number of electrons, total energy, & atomic forces to standard output
// Memory & time usage to standard output (the only output for solver = 0 & -1)
// Density & response matrix elements in the Hamiltonian sparsity pattern to "debug.out"
// F-norm for off-diagonal blocks of density & response matrices in "decay.out" (solver = 9 & 10 only)
// Fermi-smeared electronic density-of-states to "dos.out" (solver = 10 only)
// RECOMMENDED OPENMP SETTINGS:
// solver = 1 : OMP_NUM_THREADS = 1 & MKL_NUM_THREADS = # of cores , we only utilize threading through LAPACK & BLAS calls
// solver = 2 : OMP_NUM_THREADS = MKL_NUM_THREADS = 1 , MPI-based parallelism only without any threading
// otherwise : OMP_NUM_THREADS = # of cores & MKL_NUM_THREADS = 1 , threading in code & BLAS calls only for small matrix blocks
// SOFTWARE ORGANIZATION:
// 1. Fermi-Dirac approximation - fit polynomials & rational functions
// 2. NRL tight-binding model - matrix elements & their derivatives
// 3. Atomic partitioning - sets up neighbors lists for atoms
// 4. Block vector & matrix operations - native linear algebra operations in this software
// 5. Matrix construction & conversion - application-specific construction & conversion to other formats
// 6. Pseudo-random number generation - a standard PRNG generator that is better than C rand()
// 7. Iterative solvers - application-specific implementations of CG & MINRES & Chebyshev recursion
// 8. Solver mains - a main specific to each solver
// 9. Main - global control flow
// EXTERNAL LIBRARIES:
// - MKL (for BLAS, LAPACK, & FFTW3)
// - PEXSI 1.0
// - symPACK post-1.1 [development version that is adapted for PEXSI compatibility]
// - PT-Scotch 6.0.0
// - SuperLU_DIST 5.2.1
// - parMETIS 4.0.3
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <complex.h>
#include <omp.h>
#include <sys/time.h>
#include <sys/resource.h>
#include "mkl.h"
#include "fftw3.h"
#include "c_pexsi_interface.h"
#define A0 0.52917721067 // Bohr radius in Angstroms
#define E0 13.60569253 // Rydberg energy in eV
#define P0 14710.5071 // Ry/Bohr^3 in GPa
#define MIN(x,y) (((x) < (y)) ? (x) : (y))
#define MAX(x,y) (((x) > (y)) ? (x) : (y))
#ifndef M_PI
#define M_PI 3.14159265358979323846264338327950288
#endif
// MKL_INT is used as a matrix/vector index for compatibility with both 32-bit & 64-bit versions of MKL
// If MKL is not being used, define MKL_INT locally as the integer type used by BLAS & LAPACK (usually 'int')
//#define MKL_INT int
// Ditto for MKL_Complex16, but changes must also be made to infinite_reciprocal_solver if this is redefined
//#define MKL_Complex16 double complex
// Convenient hard-coded path (relative or absolute) to the rational approximation table
#define RATIONAL_TABLE_PATH "../src/table.txt" // relative path used for all benchmark calculations
// All dense matrices & matrix blocks are stored in Fortran-style column-major order
// All vectors have block structure and are stored as a sequence of memory-contiguous dense blocks (in single-index arrays)
// The block sizes are set by the natural block size for atomic partitioning in our tight-binding model (9),
// which is not optimal for performance. We choose simplicity over performance here.
#define NBLOCK_MAX 9 // hard-coded maximum block size
// Compressed-column sparsity pattern
struct pattern
{
int ncol; // number of columns
int nrow; // number of rows
int *col; // index of the first element of each column & col[ncol] is the number of nonzero elements [ncol+1]
int *row; // row of each nonzero matrix element [col[ncol]]
};
// Wrap up memory deallocation for pattern structure
void free_pattern(struct pattern* mat) // sparsity pattern to be set free [1]
{
free(mat->col);
free(mat->row);
}
//==============================//
// 1. FERMI-DIRAC APPROXIMATION //
//==============================//
// RETURN: value of the Fermi-Dirac distribution at x
double fermi(double x) // argument of the function
{ return 1.0/(1.0 + exp(x)); }
// RETURN: derivative of the Fermi-Dirac distribution at x
double dfermi_dx(double x) // argument of the function
{ return -0.5/(1.0 + cosh(x)); }
// RETURN: value of the Chebyshev polynomial expansion
double chebyshev(double x, // evaluation point
int n, // number of Chebyshev polynomials
double *coeff) // coefficients of the Chebyshev polynomials [n]
{
double T_old = 1.0, T = x, ans = 0.0;
if(n > 0) { ans += T_old*coeff[0]; }
if(n > 1) { ans += T*coeff[1]; }
for(int i=2 ; i<n ; i++)
{
double T_new = 2.0*x*T - T_old;
ans += T_new*coeff[i];
T_old = T;
T = T_new;
}
return ans;
}
// Chebyshev polynomial approximation of the Fermi-Dirac function
#define CHEBYSHEV_DX 0.1 // grid spacing needed for accurate integrals of the Fermi-Dirac function
#define GOLDEN_RATIO 1.61803398875
#define EXTREMUM_TOLERANCE 1e-12
// RETURN: maximum approximation error
double polynomial_approximation(int n, // number of Chebyshev polynomials
double min_energy, // minimum orbital energy of the system
double max_energy, // maximum orbital energy of the system
double potential, // chemical potential of the system
double temperature, // temperature of the system
double *coeff) // coefficients for Chebyshev polynomials [n]
{
// set shifted & scaled domain
double xmin = (min_energy - potential)/temperature;
double xmax = (max_energy - potential)/temperature;
// set quadrature & integrand values
int npt = MAX((int)ceil((xmax-xmin)/CHEBYSHEV_DX),2*n);
double *pt = (double*)malloc(sizeof(double)*npt);
double *val = (double*)malloc(sizeof(double)*npt);
for(int i=0 ; i<npt ; i++)
{
pt[i] = 0.5*(xmin+xmax) + 0.5*(xmin-xmax)*cos(M_PI*((double)i+0.5)/(double)npt);
val[npt-i-1] = fermi(pt[i])/(double)(2.0*npt); // reversed order & rescaling for FFTW input
}
// transform & truncate Chebyshev expansion
double *coeff_big = (double*)malloc(sizeof(double)*npt);
fftw_plan p;
p = fftw_plan_r2r_1d(npt,val,coeff_big,FFTW_REDFT10,FFTW_ESTIMATE);
fftw_execute(p);
fftw_destroy_plan(p);
for(int i=0 ; i<n ; i++) { coeff[i] = 2.0*coeff_big[i]; }
for(int i=n ; i<npt ; i++) { coeff_big[i] = 0.0; }
coeff[0] *= 0.5;
// inverse transform to generate residual grid
p = fftw_plan_r2r_1d(npt,coeff_big,val,FFTW_REDFT01,FFTW_ESTIMATE);
fftw_execute(p);
fftw_destroy_plan(p);
// find grid point with largest residual error
int ierror = -1;
double error = 0.0;
for(int i=0 ; i<npt ; i++)
{
if(fabs(val[npt-i-1] - fermi(pt[i])) > error)
{
error = fabs(val[npt-i-1] - fermi(pt[i]));
ierror = i;
}
}
// refine global residual maximum with Golden section search
double xmin0 = -cos(M_PI*((double)MAX(0,ierror-1)+0.5)/(double)npt);
double xmax0 = -cos(M_PI*((double)MIN(npt-1,ierror+1)+0.5)/(double)npt);
double xmin0_new = xmin0 + (xmax0 - xmin0)/GOLDEN_RATIO;
double xmax0_new = xmax0 - (xmax0 - xmin0)/GOLDEN_RATIO;
while(fabs(xmax0_new - xmin0_new) > EXTREMUM_TOLERANCE)
{
if( fabs(fermi(0.5*(xmin+xmax) - 0.5*(xmin-xmax)*xmin0_new) - chebyshev(xmin0_new,n,coeff)) <
fabs(fermi(0.5*(xmin+xmax) - 0.5*(xmin-xmax)*xmax0_new) - chebyshev(xmax0_new,n,coeff)) )
{ xmax0 = xmin0_new; }
else
{ xmin0 = xmax0_new; }
xmin0_new = xmin0 + (xmax0 - xmin0)/GOLDEN_RATIO;
xmax0_new = xmax0 - (xmax0 - xmin0)/GOLDEN_RATIO;
}
error = fabs(fermi(0.5*(xmin+xmax) - 0.5*(xmin-xmax)*0.5*(xmin0+xmax0)) - chebyshev(0.5*(xmin0+xmax0),n,coeff));
free(coeff_big);
free(val);
free(pt);
return error;
}
// Find an appropriate rational approximation in the table file
// RETURN: maximum approximation error
double rational_approximation(int n, // number of pole pairs
double min_energy, // minimum orbital energy of the system
double potential, // chemical potential of the system
double temperature, // temperature of the system
double complex *w, // approximation residues [n]
double complex *z) // poles (ordered by decreasing magnitude of imaginary part) [n]
{
// open the table of rational approximations
// NOTE: this version of the table has no header & is ordered by increasing # of poles & increasing error
FILE *quadrature_table = fopen(RATIONAL_TABLE_PATH,"r");
if(quadrature_table == NULL)
{ printf("ERROR: rational approximation table not found at %s\n",RATIONAL_TABLE_PATH); MPI_Abort(MPI_COMM_WORLD,0); }
int num_pole;
double approximation_error, y, real_part, imag_part;
double y_target = (potential - min_energy)/temperature;
double complex *w0 = (double complex*)malloc(sizeof(double complex)*2*n);
double complex *z0 = (double complex*)malloc(sizeof(double complex)*2*n);
// loop over entries of the table
do
{
// read the next entry of the input table
fscanf(quadrature_table,"%d %lf %lf",&num_pole,&approximation_error,&y);
if(feof(quadrature_table))
{ printf("ERROR: suitable rational approximation was not found in table\n"); MPI_Abort(MPI_COMM_WORLD,0); }
for(int i=0 ; i<num_pole ; i++)
{
fscanf(quadrature_table,"%lf %lf",&real_part,&imag_part);
w0[i] = real_part + I*imag_part;
fscanf(quadrature_table,"%lf %lf",&real_part,&imag_part);
z0[i] = real_part + I*imag_part;
}
}while(num_pole != 2*n || y < y_target);
fclose(quadrature_table);
// order by magnitude (inefficient bubble sort)
for(int i=0 ; i<2*n ; i++)
{
for(int j=i+1 ; j<2*n ; j++)
{
if(cabs(z0[j]) > cabs(z0[i]))
{
double complex c;
c = w0[i]; w0[i] = w0[j]; w0[j] = c;
c = z0[i]; z0[i] = z0[j]; z0[j] = c;
}
}
}
// shift & scale the rational approximation
for(int i=0 ; i<2*n ; i++)
{
w0[i] *= temperature;
z0[i] *= temperature;
z0[i] += potential;
}
// group poles together into conjugate pairs
for(int i=0 ; i<2*n ; i+=2)
{
for(int j=i+2 ; j<2*n ; j++)
{
if(cabs(z0[i]-conj(z0[j])) < cabs(z0[i]-conj(z0[i+1])))
{
double complex c;
c = w0[i+1]; w0[i+1] = w0[j]; w0[j] = c;
c = z0[i+1]; z0[i+1] = z0[j]; z0[j] = c;
}
// order positive imaginary part first
if(cimag(z0[i]) < cimag(z0[i+1]))
{
double complex c;
c = w0[i+1]; w0[i+1] = w0[i]; w0[i] = c;
c = z0[i+1]; z0[i+1] = z0[i]; z0[i] = c;
}
}
}
// save only one residue & pole from each pair
for(int i=0 ; i<n ; i++)
{
w[i] = w0[2*i];
z[i] = z0[2*i];
}
free(z0);
free(w0);
return approximation_error;
}
//============================//
// 2. NRL TIGHT-BINDING MODEL //
//============================//
// NRL tight-binding model parameters
struct nrl_tb
{
double Rcut, R0, Rs, lambda; // numerical cutoff radius, screening radius, screening length, & environment decay
double hs[4], hp[4], hd[4]; // onsite parameters
double hsss[4], hsps[4], hpps[4], hppp[4], hsds[4], hpds[4], hpdp[4], hdds[4], hddp[4], hddd[4]; // hopping parameters
double osss[4], osps[4], opps[4], oppp[4], osds[4], opds[4], opdp[4], odds[4], oddp[4], oddd[4]; // overlap parameters
};
// hard-coded parameters for copper
// RETURN: structure filled with copper parameters
struct nrl_tb define_copper()
{
// C99 syntax for "designated initializers"
struct nrl_tb cu = {
.Rcut = 12.5,
.R0 = 11.25, // RCUT - 5*SCREENL, for some reason not the bare parameter
.Rs = 0.25,
.lambda = .145617816949E+01,
// a, b, c, d
.hs = { .291179442078E-01, .608612040825E+02, -.580815805783E+04, .225817615341E+06 },
.hp = { .344716987246E+00, .888191059298E+02, -.627796769797E+04, .175924743450E+06 },
.hd = { -.290980998425E-02, -.280134504507E+01, .439691173572E+03, -.133435774471E+05 },
// e, f, fbar, g
.hsss = { -.597191735504E+01, .157276992857E+01, -.447299469804E+00, .968392496859E+00 },
.hsps = { .142228825776E+01, .111328503057E+00, .209048736613E-01, .816193556611E+00 },
.hpps = { -.699924962951E+00, .685983943326E+00, -.283976143863E-01, .766161691504E+00 },
.hppp = { -.194951465694E-01, -.157553504153E+01, .301142535846E+00, .943349455985E+00 },
.hsds = { -.487019125256E+00, -.122729421901E+00, -.282606250674E-01, .925507793241E+00 },
.hpds = { -.290425374224E+00, -.715797951782E-01, .137648233927E-02, .743208041114E+00 },
.hpdp = { -.186619297102E+01, .827909641955E+00, .129381300114E+00, .105155367074E+01 },
.hdds = { -.264216452809E+01, .612527278745E+00, -.411141233432E-01, .811325004989E+00 },
.hddp = { .697425666621E+01, -.173638099984E+01, .168047875555E+00, .101445807107E+01 },
.hddd = { -.122136143098E+00, -.106786813791E+00, -.573634877781E-01, .114358651642E+01 },
.osss = { -.187763110058E+01, .999745133711E+00, .294871103015E+00, .963163153997E+00 },
.osps = { .349830122695E+02, -.130114254052E+02, .607050297159E+00, .986803443924E+00 },
.opps = { .469831980051E+02, -.150210237460E+02, .423592218489E+00, .103136127318E+01 },
.oppp = { -.452858187471E+02, .212940485258E+02, -.222119065584E+01, .973686678526E+00 },
.osds = { .185975554048E+01, -.101721693929E+01, .361939123784E-01, .113738864025E+01 },
.opds = { .151404237752E+01, -.648815291269E+00, -.301781892056E+00, .107714476838E+01 },
.opdp = { -.824947586413E+01, .737040055222E+00, .202806401480E-01, .102268934886E+01 },
.odds = { .552906497058E+01, .859731091202E-01, -.303881382425E+00, .101972266315E+01 },
.oddp = { -.856025085531E+01, .413682082679E+00, .561269698491E+00, .119817640580E+01 },
.oddd = { .836929253859E-01, -.307737391082E+00, .754080691966E-01, .983776299155E+00 } };
return cu;
}
// screening function, F_c(R)
// RETURN: function value
double screen(double R, // distance between a pair of atoms
struct nrl_tb *param) // tight-binding parameters [1]
{
if(R > param->Rcut) { return 0.0; }
else { return 1.0/(1.0 + exp((R-param->R0)/param->Rs)); }
}
// RETURN: function derivative
double dscreen_dR(double R, // distance between a pair of atoms
struct nrl_tb *param) // tight-binding parameters [1]
{
if(R > param->Rcut) { return 0.0; }
else { return -0.5/((1.0 + cosh((R-param->R0)/param->Rs))*param->Rs); }
}
// local environment parameter for on-site Hamiltonian, which must be summed over neighbors
// RETURN: function value
double rho(double R, // distance between a pair of atoms
struct nrl_tb *param) // tight-binding parameters [1]
{
return exp(-pow(param->lambda,2)*R)*screen(R,param);
}
// RETURN: function derivative
double drho_dR(double R, // distance between a pair of atoms
struct nrl_tb *param) // tight-binding parameters [1]
{
return exp(-pow(param->lambda,2)*R)*(dscreen_dR(R,param) - pow(param->lambda,2)*screen(R,param));
}
// on-site tight-binding matrix element
#define RHO0 1e-16 // regularization factor
// RETURN: function value
double onsite(double rho, // total rho value summed over neighbors
double *abcd) // a, b, c, d from the NRL parameters [4]
{
return abcd[0] + abcd[1]*pow(RHO0+rho,2.0/3.0) + abcd[2]*pow(RHO0+rho,4.0/3.0) + abcd[3]*pow(RHO0+rho,2);
}
// RETURN: derivative value
double donsite_drho(double rho, // total rho value summed over neighbors
double *abcd) // a, b, c, d from the NRL parameters [4]
{
return (2.0/3.0)*abcd[1]*pow(RHO0+rho,-1.0/3.0) + (4.0/3.0)*abcd[2]*pow(RHO0+rho,1.0/3.0) + 2.0*abcd[3]*(RHO0+rho);
}
// bonding functions used to define hopping matrix elements
// RETURN: function value
double bond(double R, // distance between a pair of atoms
double *effg, // e, f, fbar, g from the NRL parameters [4]
struct nrl_tb *param) // tight-binding parameters [1]
{
return (effg[0] + effg[1]*R + effg[2]*R*R)*exp(-effg[3]*effg[3]*R)*screen(R,param);
}
// RETURN: derivative value
double dbond_dR(double R, // distance between a pair of atoms
double *effg, // e, f, fbar, g from the NRL parameters [4]
struct nrl_tb *param) // tight-binding parameters [1]
{
return (effg[1] + 2.0*effg[2]*R)*exp(-effg[3]*effg[3]*R)*screen(R,param)
- effg[3]*effg[3]*(effg[0] + effg[1]*R + effg[2]*R*R)*exp(-effg[3]*effg[3]*R)*screen(R,param)
+ (effg[0] + effg[1]*R + effg[2]*R*R)*exp(-effg[3]*effg[3]*R)*dscreen_dR(R,param);
}
// symmetrically fill in a matrix block of an s/p/d Slater-Koster tight-binding model
// NOTE: using notation consistent with [Phys. Rev. 94, 1498 (1954)]
// & orbitals ordered as: s, p_x, p_y, p_z, d_xy, d_yz, d_zx, d_{x^2-y^2}, d_{3z^2-r^2}
void fill_mat(double l, // x directional cosine
double m, // y directional cosine
double n, // z directional cosine
double sss, // s s sigma term
double sps, // s p sigma term
double pps, // p p sigma term
double ppp, // p p pi term
double sds, // s d sigma term
double pds, // p d sigma term
double pdp, // p d pi term
double dds, // d d sigma term
double ddp, // d d pi term
double ddd, // d d delta term
double *mat) // 9-by-9 matrix block [81]
{
// ss terms
mat[0+0*9] = sss;
// sp terms
mat[1+0*9] = -(mat[0+1*9] = l*sps);
mat[2+0*9] = -(mat[0+2*9] = m*sps);
mat[3+0*9] = -(mat[0+3*9] = n*sps);
// pp terms
mat[1+1*9] = l*l*pps + (1.0 - l*l)*ppp;
mat[2+2*9] = m*m*pps + (1.0 - m*m)*ppp;
mat[3+3*9] = n*n*pps + (1.0 - n*n)*ppp;
mat[2+1*9] = mat[1+2*9] = l*m*pps - l*m*ppp;
mat[3+1*9] = mat[1+3*9] = l*n*pps - l*n*ppp;
mat[3+2*9] = mat[2+3*9] = m*n*pps - m*n*ppp;
// sd terms
mat[4+0*9] = mat[0+4*9] = sqrt(3.0)*l*m*sds;
mat[5+0*9] = mat[0+5*9] = sqrt(3.0)*m*n*sds;
mat[6+0*9] = mat[0+6*9] = sqrt(3.0)*n*l*sds;
mat[7+0*9] = mat[0+7*9] = 0.5*sqrt(3.0)*(l*l - m*m)*sds;
mat[8+0*9] = mat[0+8*9] = (n*n - 0.5*(l*l + m*m))*sds;
// pd terms
mat[4+1*9] = -(mat[1+4*9] = sqrt(3.0)*l*l*m*pds + m*(1.0 - 2.0*l*l)*pdp);
mat[5+2*9] = -(mat[2+5*9] = sqrt(3.0)*m*m*n*pds + n*(1.0 - 2.0*m*m)*pdp);
mat[6+3*9] = -(mat[3+6*9] = sqrt(3.0)*n*n*l*pds + l*(1.0 - 2.0*n*n)*pdp);
mat[4+3*9] = mat[6+2*9] = mat[5+1*9] = -(mat[1+5*9] = mat[2+6*9] = mat[3+4*9] = sqrt(3.0)*l*m*n*pds - 2.0*l*m*n*pdp);
mat[6+1*9] = -(mat[1+6*9] = sqrt(3.0)*l*l*n*pds + n*(1.0 - 2.0*l*l)*pdp);
mat[4+2*9] = -(mat[2+4*9] = sqrt(3.0)*m*m*l*pds + l*(1.0 - 2.0*m*m)*pdp);
mat[5+3*9] = -(mat[3+5*9] = sqrt(3.0)*n*n*m*pds + m*(1.0 - 2.0*n*n)*pdp);
mat[7+1*9] = -(mat[1+7*9] = 0.5*sqrt(3.0)*l*(l*l - m*m)*pds + l*(1.0 - l*l + m*m)*pdp);
mat[7+2*9] = -(mat[2+7*9] = 0.5*sqrt(3.0)*m*(l*l - m*m)*pds - m*(1.0 + l*l - m*m)*pdp);
mat[7+3*9] = -(mat[3+7*9] = 0.5*sqrt(3.0)*n*(l*l - m*m)*pds - n*(l*l - m*m)*pdp);
mat[8+1*9] = -(mat[1+8*9] = l*(n*n - 0.5*(l*l + m*m))*pds - sqrt(3.0)*l*n*n*pdp);
mat[8+2*9] = -(mat[2+8*9] = m*(n*n - 0.5*(l*l + m*m))*pds - sqrt(3.0)*m*n*n*pdp);
mat[8+3*9] = -(mat[3+8*9] = n*(n*n - 0.5*(l*l + m*m))*pds + sqrt(3.0)*n*(l*l + m*m)*pdp);
// dd terms
mat[4+4*9] = 3.0*l*l*m*m*dds + (l*l + m*m - 4.0*l*l*m*m)*ddp + (n*n + l*l*m*m)*ddd;
mat[5+5*9] = 3.0*m*m*n*n*dds + (m*m + n*n - 4.0*m*m*n*n)*ddp + (l*l + m*m*n*n)*ddd;
mat[6+6*9] = 3.0*n*n*l*l*dds + (n*n + l*l - 4.0*n*n*l*l)*ddp + (m*m + n*n*l*l)*ddd;
mat[5+4*9] = mat[4+5*9] = 3.0*l*m*m*n*dds + l*n*(1.0 - 4.0*m*m)*ddp + l*n*(m*m - 1.0)*ddd;
mat[6+5*9] = mat[5+6*9] = 3.0*m*n*n*l*dds + m*l*(1.0 - 4.0*n*n)*ddp + m*l*(n*n - 1.0)*ddd;
mat[6+4*9] = mat[4+6*9] = 3.0*n*l*l*m*dds + n*m*(1.0 - 4.0*l*l)*ddp + n*m*(l*l - 1.0)*ddd;
mat[7+4*9] = mat[4+7*9] = 1.5*l*m*(l*l - m*m)*dds + 2.0*l*m*(m*m - l*l)*ddp + 0.5*l*m*(l*l - m*m)*ddd;
mat[7+5*9] = mat[5+7*9] = 1.5*m*n*(l*l - m*m)*dds - m*n*(1.0 + 2.0*(l*l - m*m))*ddp + m*n*(1.0 + 0.5*(l*l - m*m))*ddd;
mat[7+6*9] = mat[6+7*9] = 1.5*n*l*(l*l - m*m)*dds + n*l*(1.0 - 2.0*(l*l - m*m))*ddp - n*l*(1.0 - 0.5*(l*l - m*m))*ddd;
mat[8+4*9] = mat[4+8*9] = sqrt(3.0)*(l*m*(n*n - 0.5*(l*l + m*m))*dds - 2.0*l*m*n*n*ddp + 0.5*l*m*(1.0 + n*n)*ddd);
mat[8+5*9] = mat[5+8*9] = sqrt(3.0)*(m*n*(n*n - 0.5*(l*l + m*m))*dds + m*n*(l*l + m*m - n*n)*ddp - 0.5*m*n*(l*l + m*m)*ddd);
mat[8+6*9] = mat[6+8*9] = sqrt(3.0)*(n*l*(n*n - 0.5*(l*l + m*m))*dds + n*l*(l*l + m*m - n*n)*ddp - 0.5*n*l*(l*l + m*m)*ddd);
mat[7+7*9] = 0.75*pow(l*l - m*m,2)*dds + (l*l + m*m - pow(l*l - m*m,2))*ddp + (n*n + 0.25*pow(l*l - m*m,2))*ddd;
mat[8+7*9] = mat[7+8*9] = sqrt(3.0)*(0.5*(l*l - m*m)*(n*n - 0.5*(l*l + m*m))*dds + n*n*(m*m - l*l)*ddp
+ 0.25*(1.0 + n*n)*(l*l - m*m)*ddd);
mat[8+8*9] = pow(n*n - 0.5*(l*l + m*m),2)*dds + 3.0*n*n*(l*l + m*m)*ddp + 0.75*pow(l*l + m*m,2)*ddd;
}
// derivative of the Slater-Koster matrices w.r.t. l/m/n
void fill_dmat(double l, // x directional cosine
double m, // y directional cosine
double n, // z directional cosine
double sss, // s s sigma term
double sps, // s p sigma term
double pps, // p p sigma term
double ppp, // p p pi term
double sds, // s d sigma term
double pds, // p d sigma term
double pdp, // p d pi term
double dds, // d d sigma term
double ddp, // d d pi term
double ddd, // d d delta term
double *dmat) // array of 3 9-by-9 matrix blocks [243]
{
// ss terms
dmat[0+0*9+0*81] = 0.0;
dmat[0+0*9+1*81] = 0.0;
dmat[0+0*9+2*81] = 0.0;
// sp terms
dmat[1+0*9+0*81] = -(dmat[0+1*9+0*81] = sps);
dmat[1+0*9+1*81] = -(dmat[0+1*9+1*81] = 0.0);
dmat[1+0*9+2*81] = -(dmat[0+1*9+2*81] = 0.0);
dmat[2+0*9+0*81] = -(dmat[0+2*9+0*81] = 0.0);
dmat[2+0*9+1*81] = -(dmat[0+2*9+1*81] = sps);
dmat[2+0*9+2*81] = -(dmat[0+2*9+2*81] = 0.0);
dmat[3+0*9+0*81] = -(dmat[0+3*9+0*81] = 0.0);
dmat[3+0*9+1*81] = -(dmat[0+3*9+1*81] = 0.0);
dmat[3+0*9+2*81] = -(dmat[0+3*9+2*81] = sps);
// pp terms
dmat[1+1*9+0*81] = 2.0*l*pps - 2.0*l*ppp;
dmat[1+1*9+1*81] = 0.0;
dmat[1+1*9+2*81] = 0.0;
dmat[2+2*9+0*81] = 0.0;
dmat[2+2*9+1*81] = 2.0*m*pps - 2.0*m*ppp;
dmat[2+2*9+2*81] = 0.0;
dmat[3+3*9+0*81] = 0.0;
dmat[3+3*9+1*81] = 0.0;
dmat[3+3*9+2*81] = 2.0*n*pps - 2.0*n*ppp;
dmat[2+1*9+0*81] = dmat[1+2*9+0*81] = m*pps - m*ppp;
dmat[2+1*9+1*81] = dmat[1+2*9+1*81] = l*pps - l*ppp;
dmat[2+1*9+2*81] = dmat[1+2*9+2*81] = 0.0;
dmat[3+1*9+0*81] = dmat[1+3*9+0*81] = n*pps - n*ppp;
dmat[3+1*9+1*81] = dmat[1+3*9+1*81] = 0.0;
dmat[3+1*9+2*81] = dmat[1+3*9+2*81] = l*pps - l*ppp;
dmat[3+2*9+0*81] = dmat[2+3*9+0*81] = 0.0;
dmat[3+2*9+1*81] = dmat[2+3*9+1*81] = n*pps - n*ppp;
dmat[3+2*9+2*81] = dmat[2+3*9+2*81] = m*pps - m*ppp;
// sd terms
dmat[4+0*9+0*81] = dmat[0+4*9+0*81] = sqrt(3.0)*m*sds;
dmat[4+0*9+1*81] = dmat[0+4*9+1*81] = sqrt(3.0)*l*sds;
dmat[4+0*9+2*81] = dmat[0+4*9+2*81] = 0.0;
dmat[5+0*9+0*81] = dmat[0+5*9+0*81] = 0.0;
dmat[5+0*9+1*81] = dmat[0+5*9+1*81] = sqrt(3.0)*n*sds;
dmat[5+0*9+2*81] = dmat[0+5*9+2*81] = sqrt(3.0)*m*sds;
dmat[6+0*9+0*81] = dmat[0+6*9+0*81] = sqrt(3.0)*n*sds;
dmat[6+0*9+1*81] = dmat[0+6*9+1*81] = 0.0;
dmat[6+0*9+2*81] = dmat[0+6*9+2*81] = sqrt(3.0)*l*sds;
dmat[7+0*9+0*81] = dmat[0+7*9+0*81] = sqrt(3.0)*l*sds;
dmat[7+0*9+1*81] = dmat[0+7*9+1*81] = -sqrt(3.0)*m*sds;
dmat[7+0*9+2*81] = dmat[0+7*9+2*81] = 0.0;
dmat[8+0*9+0*81] = dmat[0+8*9+0*81] = -l*sds;
dmat[8+0*9+1*81] = dmat[0+8*9+1*81] = -m*sds;
dmat[8+0*9+2*81] = dmat[0+8*9+2*81] = 2.0*n*sds;
// pd terms
dmat[4+1*9+0*81] = -(dmat[1+4*9+0*81] = 2.0*sqrt(3.0)*l*m*pds - 4.0*m*l*pdp);
dmat[4+1*9+1*81] = -(dmat[1+4*9+1*81] = sqrt(3.0)*l*l*pds + (1.0 - 2.0*l*l)*pdp);
dmat[4+1*9+2*81] = -(dmat[1+4*9+2*81] = 0.0);
dmat[5+2*9+0*81] = -(dmat[2+5*9+0*81] = 0.0);
dmat[5+2*9+1*81] = -(dmat[2+5*9+1*81] = 2.0*sqrt(3.0)*m*n*pds - 4.0*n*m*pdp);
dmat[5+2*9+2*81] = -(dmat[2+5*9+2*81] = sqrt(3.0)*m*m*pds + (1.0 - 2.0*m*m)*pdp);
dmat[6+3*9+0*81] = -(dmat[3+6*9+0*81] = sqrt(3.0)*n*n*pds + (1.0 - 2.0*n*n)*pdp);
dmat[6+3*9+1*81] = -(dmat[3+6*9+1*81] = 0.0);
dmat[6+3*9+2*81] = -(dmat[3+6*9+2*81] = 2.0*sqrt(3.0)*n*l*pds - 4.0*l*n*pdp);
dmat[4+3*9+0*81] = dmat[6+2*9+0*81] = dmat[5+1*9+0*81]
= -(dmat[1+5*9+0*81] = dmat[2+6*9+0*81] = dmat[3+4*9+0*81] = sqrt(3.0)*m*n*pds - 2.0*m*n*pdp);
dmat[4+3*9+1*81] = dmat[6+2*9+1*81] = dmat[5+1*9+1*81]
= -(dmat[1+5*9+1*81] = dmat[2+6*9+1*81] = dmat[3+4*9+1*81] = sqrt(3.0)*l*n*pds - 2.0*l*n*pdp);
dmat[4+3*9+2*81] = dmat[6+2*9+2*81] = dmat[5+1*9+2*81]
= -(dmat[1+5*9+2*81] = dmat[2+6*9+2*81] = dmat[3+4*9+2*81] = sqrt(3.0)*l*m*pds - 2.0*l*m*pdp);
dmat[6+1*9+0*81] = -(dmat[1+6*9+0*81] = 2.0*sqrt(3.0)*l*n*pds - 4.0*n*l*pdp);
dmat[6+1*9+1*81] = -(dmat[1+6*9+1*81] = 0.0);
dmat[6+1*9+2*81] = -(dmat[1+6*9+2*81] = sqrt(3.0)*l*l*pds + (1.0 - 2.0*l*l)*pdp);
dmat[4+2*9+0*81] = -(dmat[2+4*9+0*81] = sqrt(3.0)*m*m*pds + (1.0 - 2.0*m*m)*pdp);
dmat[4+2*9+1*81] = -(dmat[2+4*9+1*81] = 2.0*sqrt(3.0)*m*l*pds - 4.0*l*m*pdp);
dmat[4+2*9+2*81] = -(dmat[2+4*9+2*81] = 0.0);
dmat[5+3*9+0*81] = -(dmat[3+5*9+0*81] = 0.0);
dmat[5+3*9+1*81] = -(dmat[3+5*9+1*81] = sqrt(3.0)*n*n*pds + (1.0 - 2.0*n*n)*pdp);
dmat[5+3*9+2*81] = -(dmat[3+5*9+2*81] = 2.0*sqrt(3.0)*n*m*pds - 4.0*m*n*pdp);
dmat[7+1*9+0*81] = -(dmat[1+7*9+0*81] = 0.5*sqrt(3.0)*(3.0*l*l - m*m)*pds + (1.0 - 3.0*l*l + m*m)*pdp);
dmat[7+1*9+1*81] = -(dmat[1+7*9+1*81] = -sqrt(3.0)*l*m*pds + 2.0*l*m*pdp);
dmat[7+1*9+2*81] = -(dmat[1+7*9+2*81] = 0.0);
dmat[7+2*9+0*81] = -(dmat[2+7*9+0*81] = sqrt(3.0)*m*l*pds - 2.0*m*l*pdp);
dmat[7+2*9+1*81] = -(dmat[2+7*9+1*81] = 0.5*sqrt(3.0)*(l*l - 3.0*m*m)*pds - (1.0 + l*l - 3.0*m*m)*pdp);
dmat[7+2*9+2*81] = -(dmat[2+7*9+2*81] = 0.0);
dmat[7+3*9+0*81] = -(dmat[3+7*9+0*81] = sqrt(3.0)*n*l*pds - 2.0*n*l*pdp);
dmat[7+3*9+1*81] = -(dmat[3+7*9+1*81] = -sqrt(3.0)*n*m*pds + 2.0*n*m*pdp);
dmat[7+3*9+2*81] = -(dmat[3+7*9+2*81] = 0.5*sqrt(3.0)*(l*l - m*m)*pds - (l*l - m*m)*pdp);
dmat[8+1*9+0*81] = -(dmat[1+8*9+0*81] = (n*n - 0.5*(3.0*l*l + m*m))*pds - sqrt(3.0)*n*n*pdp);
dmat[8+1*9+1*81] = -(dmat[1+8*9+1*81] = -l*m*pds);
dmat[8+1*9+2*81] = -(dmat[1+8*9+2*81] = 2.0*l*n*pds - 2.0*sqrt(3.0)*l*n*pdp);
dmat[8+2*9+0*81] = -(dmat[2+8*9+0*81] = -m*l*pds);
dmat[8+2*9+1*81] = -(dmat[2+8*9+1*81] = (n*n - 0.5*(l*l + 3.0*m*m))*pds - sqrt(3.0)*n*n*pdp);
dmat[8+2*9+2*81] = -(dmat[2+8*9+2*81] = 2.0*m*n*pds - 2.0*sqrt(3.0)*m*n*pdp);
dmat[8+3*9+0*81] = -(dmat[3+8*9+0*81] = -n*l*pds + 2.0*sqrt(3.0)*n*l*pdp);
dmat[8+3*9+1*81] = -(dmat[3+8*9+1*81] = -n*m*pds + 2.0*sqrt(3.0)*n*m*pdp);
dmat[8+3*9+2*81] = -(dmat[3+8*9+2*81] = (3.0*n*n - 0.5*(l*l + m*m))*pds + sqrt(3.0)*(l*l + m*m)*pdp);
// dd terms
dmat[4+4*9+0*81] = 6.0*l*m*m*dds + (2.0*l - 8.0*l*m*m)*ddp + 2.0*l*m*m*ddd;
dmat[4+4*9+1*81] = 6.0*l*l*m*dds + (2.0*m - 8.0*l*l*m)*ddp + 2.0*l*l*m*ddd;
dmat[4+4*9+2*81] = 2.0*n*ddd;
dmat[5+5*9+0*81] = 2.0*l*ddd;
dmat[5+5*9+1*81] = 6.0*m*n*n*dds + (2.0*m - 8.0*m*n*n)*ddp + 2.0*m*n*n*ddd;
dmat[5+5*9+2*81] = 6.0*m*m*n*dds + (2.0*n - 8.0*m*m*n)*ddp + 2.0*m*m*n*ddd;
dmat[6+6*9+0*81] = 6.0*n*n*l*dds + (2.0*l - 8.0*n*n*l)*ddp + 2.0*n*n*l*ddd;
dmat[6+6*9+1*81] = 2.0*m*ddd;
dmat[6+6*9+2*81] = 6.0*n*l*l*dds + (2.0*n - 8.0*n*l*l)*ddp + 2.0*n*l*l*ddd;
dmat[5+4*9+0*81] = dmat[4+5*9+0*81] = 3.0*m*m*n*dds + n*(1.0 - 4.0*m*m)*ddp + n*(m*m - 1.0)*ddd;
dmat[5+4*9+1*81] = dmat[4+5*9+1*81] = 6.0*l*m*n*dds - 8.0*l*n*m*ddp + 2.0*l*n*m*ddd;
dmat[5+4*9+2*81] = dmat[4+5*9+2*81] = 3.0*l*m*m*dds + l*(1.0 - 4.0*m*m)*ddp + l*(m*m - 1.0)*ddd;
dmat[6+5*9+0*81] = dmat[5+6*9+0*81] = 3.0*m*n*n*dds + m*(1.0 - 4.0*n*n)*ddp + m*(n*n - 1.0)*ddd;
dmat[6+5*9+1*81] = dmat[5+6*9+1*81] = 3.0*n*n*l*dds + l*(1.0 - 4.0*n*n)*ddp + l*(n*n - 1.0)*ddd;
dmat[6+5*9+2*81] = dmat[5+6*9+2*81] = 6.0*m*n*l*dds - 8.0*m*l*n*ddp + 2.0*m*l*n*ddd;
dmat[6+4*9+0*81] = dmat[4+6*9+0*81] = 6.0*n*l*m*dds - 8.0*n*m*l*ddp + 2.0*n*m*l*ddd;
dmat[6+4*9+1*81] = dmat[4+6*9+1*81] = 3.0*n*l*l*dds + n*(1.0 - 4.0*l*l)*ddp + n*(l*l - 1.0)*ddd;
dmat[6+4*9+2*81] = dmat[4+6*9+2*81] = 3.0*l*l*m*dds + m*(1.0 - 4.0*l*l)*ddp + m*(l*l - 1.0)*ddd;
dmat[7+4*9+0*81] = dmat[4+7*9+0*81] = 1.5*m*(3.0*l*l - m*m)*dds + 2.0*m*(m*m - 3.0*l*l)*ddp + 0.5*m*(3.0*l*l - m*m)*ddd;
dmat[7+4*9+1*81] = dmat[4+7*9+1*81] = 1.5*l*(l*l - 3.0*m*m)*dds + 2.0*l*(3.0*m*m - l*l)*ddp + 0.5*l*(l*l - 3.0*m*m)*ddd;
dmat[7+4*9+2*81] = dmat[4+7*9+2*81] = 0.0;
dmat[7+5*9+0*81] = dmat[5+7*9+0*81] = 3.0*m*n*l*dds - 4.0*m*n*l*ddp + m*n*l*ddd;
dmat[7+5*9+1*81] = dmat[5+7*9+1*81] = 1.5*n*(l*l - 3.0*m*m)*dds - n*(1.0 + 2.0*(l*l - 3.0*m*m))*ddp
+ n*(1.0 + 0.5*(l*l - 3.0*m*m))*ddd;
dmat[7+5*9+2*81] = dmat[5+7*9+2*81] = 1.5*m*(l*l - m*m)*dds - m*(1.0 + 2.0*(l*l - m*m))*ddp + m*(1.0 + 0.5*(l*l - m*m))*ddd;
dmat[7+6*9+0*81] = dmat[6+7*9+0*81] = 1.5*n*(3.0*l*l - m*m)*dds + n*(1.0 - 2.0*(3.0*l*l - m*m))*ddp
- n*(1.0 - 0.5*(3.0*l*l - m*m))*ddd;
dmat[7+6*9+1*81] = dmat[6+7*9+1*81] = -3.0*n*l*m*dds + 4.0*n*l*m*ddp - n*l*m*ddd;
dmat[7+6*9+2*81] = dmat[6+7*9+2*81] = 1.5*l*(l*l - m*m)*dds + l*(1.0 - 2.0*(l*l - m*m))*ddp - l*(1.0 - 0.5*(l*l - m*m))*ddd;
dmat[8+4*9+0*81] = dmat[4+8*9+0*81] = sqrt(3.0)*(m*(n*n - 0.5*(3.0*l*l + m*m))*dds - 2.0*m*n*n*ddp + 0.5*m*(1.0 + n*n)*ddd);
dmat[8+4*9+1*81] = dmat[4+8*9+1*81] = sqrt(3.0)*(l*(n*n - 0.5*(l*l + 3.0*m*m))*dds - 2.0*l*n*n*ddp + 0.5*l*(1.0 + n*n)*ddd);
dmat[8+4*9+2*81] = dmat[4+8*9+2*81] = sqrt(3.0)*(2.0*l*m*n*dds - 4.0*l*m*n*ddp + l*m*n*ddd);
dmat[8+5*9+0*81] = dmat[5+8*9+0*81] = sqrt(3.0)*(-m*n*l*dds + 2.0*m*n*l*ddp - m*n*l*ddd);
dmat[8+5*9+1*81] = dmat[5+8*9+1*81] = sqrt(3.0)*(n*(n*n - 0.5*(l*l + 3.0*m*m))*dds + n*(l*l + 3.0*m*m - n*n)*ddp
- 0.5*n*(l*l + 3.0*m*m)*ddd);
dmat[8+5*9+2*81] = dmat[5+8*9+2*81] = sqrt(3.0)*(m*(3.0*n*n - 0.5*(l*l + m*m))*dds + m*(l*l + m*m - 3.0*n*n)*ddp
- 0.5*m*(l*l + m*m)*ddd);
dmat[8+6*9+0*81] = dmat[6+8*9+0*81] = sqrt(3.0)*(n*(n*n - 0.5*(3.0*l*l + m*m))*dds + n*(3.0*l*l + m*m - n*n)*ddp
- 0.5*n*(3.0*l*l + m*m)*ddd);
dmat[8+6*9+1*81] = dmat[6+8*9+1*81] = sqrt(3.0)*(-n*l*m*dds + 2.0*n*l*m*ddp - n*l*m*ddd);
dmat[8+6*9+2*81] = dmat[6+8*9+2*81] = sqrt(3.0)*(l*(3.0*n*n - 0.5*(l*l + m*m))*dds + l*(l*l + m*m - 3.0*n*n)*ddp
- 0.5*l*(l*l + m*m)*ddd);
dmat[7+7*9+0*81] = 3.0*l*(l*l - m*m)*dds + (2.0*l - 4.0*l*(l*l - m*m))*ddp + l*(l*l - m*m)*ddd;
dmat[7+7*9+1*81] = -3.0*m*(l*l - m*m)*dds + (2.0*m + 4.0*m*(l*l - m*m))*ddp - m*(l*l - m*m)*ddd;
dmat[7+7*9+2*81] = 2.0*n*ddd;
dmat[8+7*9+0*81] = dmat[7+8*9+0*81] = sqrt(3.0)*(l*(n*n - 0.5*(l*l + m*m))*dds - 0.5*(l*l - m*m)*l*dds - 2.0*n*n*l*ddp
+ 0.5*(1.0 + n*n)*l*ddd);
dmat[8+7*9+1*81] = dmat[7+8*9+1*81] = sqrt(3.0)*(-m*(n*n - 0.5*(l*l + m*m))*dds - 0.5*(l*l - m*m)*m*dds + 2.0*n*n*m*ddp
- 0.5*(1.0 + n*n)*m*ddd);
dmat[8+7*9+2*81] = dmat[7+8*9+2*81] = sqrt(3.0)*((l*l - m*m)*n*dds + 2.0*n*(m*m - l*l)*ddp + 0.5*n*(l*l - m*m)*ddd);
dmat[8+8*9+0*81] = -2.0*l*(n*n - 0.5*(l*l + m*m))*dds + 6.0*n*n*l*ddp + 3.0*l*(l*l + m*m)*ddd;
dmat[8+8*9+1*81] = -2.0*m*(n*n - 0.5*(l*l + m*m))*dds + 6.0*n*n*m*ddp + 3.0*m*(l*l + m*m)*ddd;
dmat[8+8*9+2*81] = 4.0*n*(n*n - 0.5*(l*l + m*m))*dds + 6.0*n*(l*l + m*m)*ddp;
}
// distance between a pair of atoms
double distance(double *atom1, // coordinate of 1st atom [3]
double *atom2) // coordinate of 2nd atom [3]
{
double d2 = 0.0;
for(size_t i=0 ; i<3 ; i++) { d2 += pow(atom1[i] - atom2[i],2); }
return sqrt(d2);
}
// calculate a diagonal atomic matrix block of the tight-binding model
void tb_diagonal(int iatom, // atom index
int natom, // number of atoms
double *atom, // atomic coordinates [3*natom]
int nneighbor, // number of neighbors coupled to iatom
int *neighbor, // neighbor list of iatom [nneighbor]
struct nrl_tb *param, // tight-binding parameters [1]
double *hblock, // Hamiltonian matrix elements [81]
double *oblock) // overlap matrix elements [81]
{
// calculate rho for iatom
double rho0 = 0.0;
for(int i=0 ; i<nneighbor ; i++)
{
if(iatom != neighbor[i])
{ rho0 += rho(distance(&(atom[3*iatom]),&(atom[3*neighbor[i]])),param); }
}
// calculate the matrix elements
for(int i=0 ; i<81 ; i++) { hblock[i] = oblock[i] = 0.0; }
hblock[0+0*9] = onsite(rho0,param->hs);
hblock[1+1*9] = hblock[2+2*9] = hblock[3+3*9] = onsite(rho0,param->hp);
hblock[4+4*9] = hblock[5+5*9] = hblock[6+6*9] = hblock[7+7*9] = hblock[8+8*9] = onsite(rho0,param->hd);
for(int i=0 ; i<9 ; i++) { oblock[i+i*9] = 1.0; }
}
// calculate atomic response of a diagonal atomic matrix block of the tight-binding model
void tb_diagonal_force(int iatom, // atom index of matrix elements
int jatom, // atom index of perturbed atom
int natom, // number of atoms
double *atom, // atomic coordinates [3*natom]
int nneighbor, // number of neighbors coupled to iatom
int *neighbor, // neighbor list of iatom [nneighbor]
struct nrl_tb *param, // tight-binding parameters [1]
double *hblock_force) // array of 3 Hamiltonian matrix elements [243]
{
// calculate rho for iatom
double rho0 = 0.0, rho0_force[3] = { 0.0, 0.0, 0.0 };
for(int i=0 ; i<nneighbor ; i++)
{
if(iatom != neighbor[i])
{ rho0 += rho(distance(&(atom[3*iatom]),&(atom[3*neighbor[i]])),param); }
}
// when iatom == jatom, the entire sum over neighbors contributes
if(iatom == jatom)
{
for(int i=0 ; i<nneighbor ; i++)
{
if(iatom == neighbor[i]) { continue; }
double R = distance(&(atom[3*iatom]),&(atom[3*neighbor[i]]));
double drho_dR0 = drho_dR(R,param);
for(int j=0 ; j<3 ; j++)
{ rho0_force[j] += drho_dR0*(atom[j+iatom*3]-atom[j+neighbor[i]*3])/R; }
}
}
else // when iatom != jatom, only a single term in the rho sum is perturbed
{
double R = distance(&(atom[3*iatom]),&(atom[3*jatom]));
double drho_dR0 = drho_dR(R,param);
for(int j=0 ; j<3 ; j++)
{ rho0_force[j] += drho_dR0*(atom[j+jatom*3]-atom[j+iatom*3])/R; }
}
for(int i=0 ; i<243 ; i++) { hblock_force[i] = 0.0; }
for(int i=0 ; i<3 ; i++)
{
hblock_force[0+0*9+i*81] = -donsite_drho(rho0,param->hs)*rho0_force[i];
hblock_force[1+1*9+i*81] = hblock_force[2+2*9+i*81] = hblock_force[3+3*9+i*81]
= -donsite_drho(rho0,param->hp)*rho0_force[i];
hblock_force[4+4*9+i*81] = hblock_force[5+5*9+i*81] = hblock_force[6+6*9+i*81] = hblock_force[7+7*9+i*81]
= hblock_force[8+8*9+i*81] = -donsite_drho(rho0,param->hd)*rho0_force[i];
}
}
// calculate an offdiagonal atomic matrix block of the tight-binding model
void tb_offdiagonal(int iatom, // 1st atom index
int jatom, // 2nd atom index
int natom, // number of atoms
double *atom, // atomic coordinates [3*natom]
struct nrl_tb *param, // tight-binding parameters [1]
double *hblock, // Hamiltonian matrix elements [81]
double *oblock) // overlap matrix elements [81]
{
// calculate distance between atoms and directional cosines
double R = distance(&(atom[3*iatom]),&(atom[3*jatom]));
double l = (atom[0+iatom*3]-atom[0+jatom*3])/R;
double m = (atom[1+iatom*3]-atom[1+jatom*3])/R;
double n = (atom[2+iatom*3]-atom[2+jatom*3])/R;
fill_mat(l,m,n,bond(R,param->hsss,param),bond(R,param->hsps,param),bond(R,param->hpps,param),bond(R,param->hppp,param),
bond(R,param->hsds,param),bond(R,param->hpds,param),bond(R,param->hpdp,param),bond(R,param->hdds,param),
bond(R,param->hddp,param),bond(R,param->hddd,param),hblock);
fill_mat(l,m,n,bond(R,param->osss,param),bond(R,param->osps,param),bond(R,param->opps,param),bond(R,param->oppp,param),
bond(R,param->osds,param),bond(R,param->opds,param),bond(R,param->opdp,param),bond(R,param->odds,param),
bond(R,param->oddp,param),bond(R,param->oddd,param),oblock);
}
// calculate atomic response of an offdiagonal atomic matrix block of the tight-binding model
void tb_offdiagonal_force(int iatom, // 1st atom index & perturbed atom
int jatom, // 2nd atom index
int natom, // number of atoms
double *atom, // atomic coordinates [3*natom]
struct nrl_tb *param, // tight-binding parameters [1]
double *hblock_force, // array of 3 Hamiltonian matrix elements [243]
double *oblock_force) // array of 3 overlap matrix elements [243]
{
// calculate distance between atoms and directional cosines
double R = distance(&(atom[3*iatom]),&(atom[3*jatom]));
double l = (atom[0+iatom*3] - atom[0+jatom*3])/R;
double m = (atom[1+iatom*3] - atom[1+jatom*3])/R;
double n = (atom[2+iatom*3] - atom[2+jatom*3])/R;
// derivative of the bond functions
double dhblock_dR[81], doblock_dR[81];
fill_mat(l,m,n,dbond_dR(R,param->hsss,param),dbond_dR(R,param->hsps,param),dbond_dR(R,param->hpps,param),
dbond_dR(R,param->hppp,param),dbond_dR(R,param->hsds,param),dbond_dR(R,param->hpds,param),
dbond_dR(R,param->hpdp,param),dbond_dR(R,param->hdds,param),dbond_dR(R,param->hddp,param),
dbond_dR(R,param->hddd,param),dhblock_dR);
fill_mat(l,m,n,dbond_dR(R,param->osss,param),dbond_dR(R,param->osps,param),dbond_dR(R,param->opps,param),
dbond_dR(R,param->oppp,param),dbond_dR(R,param->osds,param),dbond_dR(R,param->opds,param),
dbond_dR(R,param->opdp,param),dbond_dR(R,param->odds,param),dbond_dR(R,param->oddp,param),
dbond_dR(R,param->oddd,param),doblock_dR);
// derivative of l/m/n
double dhblock_dlmn[243], doblock_dlmn[243];
fill_dmat(l,m,n,bond(R,param->hsss,param),bond(R,param->hsps,param),bond(R,param->hpps,param),bond(R,param->hppp,param),
bond(R,param->hsds,param),bond(R,param->hpds,param),bond(R,param->hpdp,param),bond(R,param->hdds,param),
bond(R,param->hddp,param),bond(R,param->hddd,param),dhblock_dlmn);
fill_dmat(l,m,n,bond(R,param->osss,param),bond(R,param->osps,param),bond(R,param->opps,param),bond(R,param->oppp,param),
bond(R,param->osds,param),bond(R,param->opds,param),bond(R,param->opdp,param),bond(R,param->odds,param),
bond(R,param->oddp,param),bond(R,param->oddd,param),doblock_dlmn);
for(int i=0 ; i<81 ; i++)
{
double dhblock0 = dhblock_dlmn[i+0*81]*l + dhblock_dlmn[i+1*81]*m + dhblock_dlmn[i+2*81]*n;
hblock_force[i+0*81] = -dhblock_dR[i]*l - dhblock_dlmn[i+0*81]/R + dhblock0*l/R;
hblock_force[i+1*81] = -dhblock_dR[i]*m - dhblock_dlmn[i+1*81]/R + dhblock0*m/R;
hblock_force[i+2*81] = -dhblock_dR[i]*n - dhblock_dlmn[i+2*81]/R + dhblock0*n/R;
double doblock0 = doblock_dlmn[i+0*81]*l + doblock_dlmn[i+1*81]*m + doblock_dlmn[i+2*81]*n;
oblock_force[i+0*81] = -doblock_dR[i]*l - doblock_dlmn[i+0*81]/R + doblock0*l/R;
oblock_force[i+1*81] = -doblock_dR[i]*m - doblock_dlmn[i+1*81]/R + doblock0*m/R;
oblock_force[i+2*81] = -doblock_dR[i]*n - doblock_dlmn[i+2*81]/R + doblock0*n/R;
}
}
//========================//
// 3. ATOMIC PARTITIONING //
//========================//
// NOTE: This version does not support additional blocking of atoms, which would improve performance but complicate the code
// The atoms would be reordered so that atoms within a block are contiguous & a neighbor list of blocks would be computed
// in addition to the neighbor list of atoms to define the block-sparse density matrix structure
// grid of boxes structure that partition the atoms
struct grid
{
int nx[3]; // number of boxes in each direction
double x0[3]; // minimum coordinate in each direction
double dx[3]; // width of boxes in each direction
int *to_atom; // index of locations in atom_index for the first atom in each box [nx[0]*nx[1]*nx[2]+1]
// NOTE: this list is ordered & to_atom[nx[0]*nx[1]*nx[2]] is the number of atoms
int *atom_index; // list of atom indices contained in each box [to_atom[nx*ny*nz]]
};
// find the box that contains a given atom
void box_index(double *atom, // target atom [3]
struct grid *partition, // specification of the grid for partitioning atoms [1]
int *box) // output box index [3]
{
for(int i=0 ; i<3 ; i++)
{ box[i] = (int)((atom[i] - partition->x0[i])/partition->dx[i]); }
}
// Find the grid index of a box
int grid_index(int* box, // target box [3]
struct grid* partition) // specification of the grid for partitioning atoms [1]
{
return (box[0] + partition->nx[0]*(box[1] + partition->nx[1]*box[2]));
}
// comparison function for sorting atoms by box using the C qsort function in stdlib.h
// RETURN: 1 if a goes after b, -1 if a goes before b, 0 if they are equal
int list_compare(const void *a, const void *b)
{
// sort by grid index first ...
if( ((int*)a)[1] > ((int*)b)[1] ) return 1;
if( ((int*)a)[1] < ((int*)b)[1] ) return -1;
// ... and atom index second
if( ((int*)a)[0] > ((int*)b)[0] ) return 1;
if( ((int*)a)[0] < ((int*)b)[0] ) return -1;
return 0;
}
// construct the grid structure for a list of atoms and a box width
// RETURN: grid structure with allocated memory
struct grid construct_grid(int natom, // number of atoms
double *atom, // atomic coordinates [3*natom]
double width) // box width that defines the uniform grid of boxes
{
struct grid partition;
// define the grid coordinates
for(int i=0 ; i<3 ; i++)
{
double xmin = atom[i], xmax = atom[i];
for(int j=1 ; j<natom ; j++)
{
if(atom[i+j*3] < xmin) { xmin = atom[i+j*3]; }
if(atom[i+j*3] > xmax) { xmax = atom[i+j*3]; }
}
partition.dx[i] = width; // uniform boxes
partition.nx[i] = (int)ceil((xmax - xmin)/width) + 1; // pad to prevent atoms near grid boundaries
partition.x0[i] = 0.5*(xmin + xmax - partition.nx[i]*width);
}
// memory allocation
int ngrid = partition.nx[0]*partition.nx[1]*partition.nx[2];
int *sort_list = (int*)malloc(sizeof(int)*2*natom);
partition.atom_index = (int*)malloc(sizeof(int)*natom); // not locally deallocated
partition.to_atom = (int*)malloc(sizeof(int)*(ngrid+1)); // not locally deallocated
// assign each atom to a box in the grid
for(int i=0 ; i<natom ; i++)
{
sort_list[2*i] = i;
int box[3];
box_index(&(atom[3*i]),&partition,box);
sort_list[1+2*i] = grid_index(box,&partition);
}
// sort atoms by box
qsort(sort_list,natom,sizeof(int)*2,list_compare);
// move sorted list into atom_index & construct to_atom
for(int i=0 ; i<ngrid ; i++)
{ partition.to_atom[i] = natom + 1; } // (natom + 1) indicates that a box has not been set yet
partition.to_atom[ngrid] = natom; // last entry is the number of atoms
for(int i=0 ; i<natom ; i++)
{
partition.atom_index[i] = sort_list[2*i];
if(partition.to_atom[sort_list[1+2*i]] == (natom + 1))
{ partition.to_atom[sort_list[1+2*i]] = i; }
}
for(int i=ngrid ; i>=1 ; i--)
{
if(partition.to_atom[i-1] == (natom + 1))
{ partition.to_atom[i-1] = partition.to_atom[i]; }
}
// memory deallocation
free(sort_list);
return partition;
}
// comparison function for sorting neighbor lists using the C qsort function in stdlib.h
// RETURN: 1 if a goes after b, -1 if a goes before b, 0 if they are equal
int neighbor_compare(const void *a, const void *b)
{
if( *((int*)a) > *((int*)b) ) return 1;
if( *((int*)a) < *((int*)b) ) return -1;
return 0;
}
// create a list of neighboring atoms for each atom (including self) as a sparsity pattern in CRS format
void neighbor_list(int natom, // number of atoms
double *atom, // atomic coordinates [3*natom]
double radius, // cutoff radius used to define the neighbor list
struct pattern *neighbor) // neighbor list defined by matrix sparsity pattern (no matrix elements) [1]
{
// determine a minimum radius value to avoid memory problems
double xmin[3], xmax[3];
for(int i=0 ; i<3 ; i++)
{
xmin[i] = xmax[i] = atom[i];
for(int j=1 ; j<natom ; j++)
{
if(atom[i+j*3] < xmin[i]) { xmin[i] = atom[i+j*3]; }
if(atom[i+j*3] > xmax[i]) { xmax[i] = atom[i+j*3]; }
}
}
double radius0 = pow((xmax[0] - xmin[0])*(xmax[1] - xmin[1])*(xmax[2] - xmin[2])/(double)(natom*pow(NBLOCK_MAX,2)),1.0/3.0);
// create a grid with allocated memory
struct grid partition;
if(radius > radius0) { partition = construct_grid(natom,atom,radius); }
else { partition = construct_grid(natom,atom,radius0); }
// allocate column list in neighbor matrix to store # of nearest neighbors
neighbor->ncol = neighbor->nrow = natom;
neighbor->col = (int*)malloc(sizeof(int)*(natom+1));
neighbor->col[0] = 0;
// perform work 1 box at a time (1st pass to count neighbors in neighbor->row)
for(int i=0 ; i<partition.nx[0] ; i++)
for(int j=0 ; j<partition.nx[1] ; j++)
for(int k=0 ; k<partition.nx[2] ; k++)
{
int box1[3] = { i, j, k };
// range of neighboring boxes
int xmin = 0, ymin = 0, zmin = 0, xmax = partition.nx[0]-1, ymax = partition.nx[1]-1, zmax = partition.nx[2]-1;
if(i > 0) { xmin = i-1; }
if(j > 0) { ymin = j-1; }
if(k > 0) { zmin = k-1; }
if(i < partition.nx[0]-1) { xmax = i+1; }
if(j < partition.nx[1]-1) { ymax = j+1; }
if(k < partition.nx[2]-1) { zmax = k+1; }
// find neighbors for each atom in the box
int iatom_min = partition.to_atom[grid_index(box1,&partition)];
int iatom_max = partition.to_atom[grid_index(box1,&partition)+1];
for(int iatom=iatom_min; iatom<iatom_max ; iatom++)
{
neighbor->col[partition.atom_index[iatom]+1] = 0;
// count the neighbors
for(int x=xmin ; x<=xmax ; x++)
for(int y=ymin ; y<=ymax ; y++)
for(int z=zmin ; z<=zmax ; z++)
{
int box2[3] = { x, y, z };
int jatom_min = partition.to_atom[grid_index(box2,&partition)];
int jatom_max = partition.to_atom[grid_index(box2,&partition)+1];
for(int jatom=jatom_min; jatom<jatom_max ; jatom++)
{
if(distance(&(atom[3*partition.atom_index[iatom]]),&(atom[3*partition.atom_index[jatom]])) <= radius)
{ neighbor->col[partition.atom_index[iatom]+1]++; }
}
}
}
}
// convert from # of neighbors to column offsets
for(int i=0 ; i<natom ; i++)
{ neighbor->col[i+1] += neighbor->col[i]; }
neighbor->row = (int*)malloc(sizeof(int)*neighbor->col[neighbor->ncol]);
// perform work 1 box at a time (2nd pass to assign neighbors in neighbor->col)
for(int i=0 ; i<partition.nx[0] ; i++)
for(int j=0 ; j<partition.nx[1] ; j++)
for(int k=0 ; k<partition.nx[2] ; k++)
{
int box1[3] = { i, j, k };
// range of neighboring boxes
int xmin = 0, ymin = 0, zmin = 0, xmax = partition.nx[0]-1, ymax = partition.nx[1]-1, zmax = partition.nx[2]-1;
if(i > 0) { xmin = i-1; }
if(j > 0) { ymin = j-1; }
if(k > 0) { zmin = k-1; }
if(i < partition.nx[0]-1) { xmax = i+1; }
if(j < partition.nx[1]-1) { ymax = j+1; }
if(k < partition.nx[2]-1) { zmax = k+1; }
// find neighbors for each atom in the box
int iatom_min = partition.to_atom[grid_index(box1,&partition)];
int iatom_max = partition.to_atom[grid_index(box1,&partition)+1];
for(int iatom=iatom_min; iatom<iatom_max ; iatom++)
{
// store the neighbors
int ineighbor = 0;
for(int x=xmin ; x<=xmax ; x++)
for(int y=ymin ; y<=ymax ; y++)
for(int z=zmin ; z<=zmax ; z++)
{
int box2[3] = { x, y, z };
int jatom_min = partition.to_atom[grid_index(box2,&partition)];
int jatom_max = partition.to_atom[grid_index(box2,&partition)+1];
for(int jatom=jatom_min; jatom<jatom_max ; jatom++)
{
if(distance(&(atom[3*partition.atom_index[iatom]]),&(atom[3*partition.atom_index[jatom]])) <= radius)
{ neighbor->row[neighbor->col[partition.atom_index[iatom]]+(ineighbor++)] = partition.atom_index[jatom]; }
}
}
}
}
// order the neighbor lists by atomic index
for(int i=0 ; i<natom ; i++)
{ qsort(&(neighbor->row[neighbor->col[i]]),neighbor->col[i+1]-neighbor->col[i],sizeof(int),neighbor_compare); }
// free memory used by the grid
free(partition.to_atom);
free(partition.atom_index);
}
// Welsh-Powell greedy graph coloring algorithm (ncolor <= maximum vertex degree + 1)
void color_graph(struct pattern *graph, // adjacency matrix of graph, assumed symmetric [1]
int *ncolor, // number of colors used to color the graph [1]
int **color, // index of the first entry of each color [1]
int **vertex_ptr) // index of vertices, sorted by color [1]
{
// create & sort a list of vertex degrees
int *degree = (int*)malloc(sizeof(int)*2*graph->ncol);
for(int i=0 ; i<graph->ncol ; i++)
{
degree[2*i] = graph->col[i+1]-graph->col[i]; // degree of vertex
degree[2*i+1] = i; // index of vertex
}
qsort(degree,graph->ncol,2*sizeof(int),neighbor_compare);
// temporary inverse list of vertex colors
*ncolor = 1;
int *vertex_color = (int*)malloc(sizeof(int)*graph->ncol);
for(int i=0 ; i<graph->ncol ; i++)
{ vertex_color[i] = -1; }
// color using the inverse list
int num_uncolored = graph->ncol;
*ncolor = 0;
while(num_uncolored > 0)
{
// loop over uncolored vertices
int offset = 0; // pruning offset
for(int i=0 ; i<num_uncolored ; i++)
{
// copy degree list w/ pruning offset
degree[2*(i-offset)] = degree[2*i];
degree[2*(i-offset)+1] = degree[2*i+1];
// check if it is connected to a vertex of the active color
int collision = 0;
for(int j=graph->col[degree[2*i+1]] ; j<graph->col[degree[2*i+1]+1] ; j++)
{ if(vertex_color[graph->row[j]] == *ncolor) { collision = 1; } }
// if it isn't connected, color & offset for pruning
if(collision == 0)
{
vertex_color[degree[2*i+1]] = *ncolor;
offset++;
}
}
num_uncolored -= offset;
(*ncolor)++;
}
// allocate memory for the coloring
*color = (int*)malloc(sizeof(int)*(*ncolor+1));
*vertex_ptr = (int*)malloc(sizeof(int)*graph->ncol);
// count the number of vertices per color & properly offset
for(int i=0 ; i<=*ncolor ; i++) { (*color)[i] = 0; }
for(int i=0 ; i<graph->ncol ; i++) { ((*color)[vertex_color[i]+1])++; }
for(int i=0 ; i<*ncolor ; i++) { (*color)[i+1] += (*color)[i]; }
// invert the vertex color list
for(int i=0 ; i<graph->ncol ; i++)
{ (*vertex_ptr)[((*color)[vertex_color[i]])++] = i; }
// re-count the number of vertices per color & properly offset
for(int i=0 ; i<=*ncolor ; i++) { (*color)[i] = 0; }
for(int i=0 ; i<graph->ncol ; i++) { ((*color)[vertex_color[i]+1])++; }
for(int i=0 ; i<*ncolor ; i++) { (*color)[i+1] += (*color)[i]; }
// deallocate temporary memory
free(vertex_color);
free(degree);
}
// comparison function for sorting lattice vector lists using the C qsort function in stdlib.h
// RETURN: 1 if a goes after b, -1 if a goes before b, 0 if they are equal
int latvec_compare(const void *a, const void *b)
{
if( ((unsigned int*)a)[0] > ((unsigned int*)b)[0] ) return 1;
if( ((unsigned int*)a)[0] < ((unsigned int*)b)[0] ) return -1;
if( ((unsigned int*)a)[1] > ((unsigned int*)b)[1] ) return 1;
if( ((unsigned int*)a)[1] < ((unsigned int*)b)[1] ) return -1;
if( ((unsigned int*)a)[2] > ((unsigned int*)b)[2] ) return 1;
if( ((unsigned int*)a)[2] < ((unsigned int*)b)[2] ) return -1;
return 0;
}
// calculate the volume of a unit cell whose lattice vectors are defined by 4 atomic coordinates (1st is central atom)
double cell_volume(double *atom) // list of atomic coordinates [12]
{
// calculate lattice vectors
double latvec[3][3];
for(int i=0 ; i<3 ; i++)
for(int j=0 ; j<3 ; j++)
{ latvec[i][j] = atom[j+(1+i)*3] - atom[j]; }
return fabs(latvec[0][0]*(latvec[1][1]*latvec[2][2] - latvec[1][2]*latvec[2][1])
+ latvec[0][1]*(latvec[1][2]*latvec[2][0] - latvec[1][0]*latvec[2][2])
+ latvec[0][2]*(latvec[1][0]*latvec[2][1] - latvec[1][1]*latvec[2][0]));
}
// construct a list of lattice vectors within a localization radius
// RETURN: number of lattice vectors (nlatvec)
int latvec_list(double local_radius, // localization radius for truncation
int **list, // (allocated) list of lattice vectors on output [1][3*nlatvec]
double **atom) // (allocated) equivalent list of atomic coordinates [1][3*nlatvec]
{
// store lattice vectors
double latvec[9];
for(int i=0 ; i<3 ; i++)
for(int j=0 ; j<3 ; j++)
{ latvec[j+i*3] = (*atom)[j+(1+i)*3] - (*atom)[j]; }
free(*atom);
// identify lattice vector bounds
int max_index[3];
for(int i=0 ; i<3 ; i++)
{ max_index[i] = MAX(max_index[i],ceil(local_radius/sqrt(pow(latvec[3*i],2)+pow(latvec[3*i+1],2)+pow(latvec[3*i+2],2)))); }
// count the number of active lattice vectors
int nlist = 0;
double atom0[3];
for(int i=-max_index[0] ; i<=max_index[0] ; i++)
for(int j=-max_index[1] ; j<=max_index[1] ; j++)
for(int k=-max_index[2] ; k<=max_index[2] ; k++)
{
for(int l=0 ; l<3 ; l++)
{ atom0[l] = i*latvec[l] + j*latvec[l+3] + k*latvec[l+6]; }
if(sqrt(pow(atom0[0],2)+pow(atom0[1],2)+pow(atom0[2],2)) <= local_radius) { nlist++; }
}
// assign the active lattice vectors
*list = (int*)malloc(sizeof(int)*3*nlist);
nlist = 0;
for(int i=-max_index[0] ; i<=max_index[0] ; i++)
for(int j=-max_index[1] ; j<=max_index[1] ; j++)
for(int k=-max_index[2] ; k<=max_index[2] ; k++)
{
for(int l=0 ; l<3 ; l++)
{ atom0[l] = i*latvec[l] + j*latvec[l+3] + k*latvec[l+6]; }
if(sqrt(pow(atom0[0],2)+pow(atom0[1],2)+pow(atom0[2],2)) <= local_radius)
{ (*list)[3*nlist] = i; (*list)[1+3*nlist] = j; (*list)[2+3*nlist] = k; nlist++; }
}
// sort the active lattice vectors
qsort(*list,nlist,3*sizeof(int),latvec_compare);
// construct the sorted atom list
*atom = (double*)malloc(sizeof(double)*3*nlist);
for(int i=0 ; i<nlist ; i++)
{
for(int j=0 ; j<3 ; j++)
{ (*atom)[j+3*i] = (*list)[3*i]*latvec[j] + (*list)[1+3*i]*latvec[j+3] + (*list)[2+3*i]*latvec[j+6]; }
}
return nlist;
}
//=====================================//
// 4. BLOCK VECTOR & MATRIX OPERATIONS //
//=====================================//
// zero the entries of a block vector
void zero_vec(int nblock, // block size
int nvec, // dimension of vector (# of blocks)
double *vec) // vector elements [nblock*nblock*nvec]
{
int ndata = nblock*nblock*nvec;
#pragma omp parallel for
for(int i=0 ; i<ndata ; i++)
{ vec[i] = 0.0; }
}
// zero the entries of a block-sparse matrix
void zero_mat(int nblock, // block size
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrix [1]
double **mat) // matrix elements of the sparse matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
#pragma omp parallel for collapse(2)
for(int i=0 ; i<sparsity->col[sparsity->ncol] ; i++)
for(int j=0 ; j<nblock*nblock ; j++)
{ mat[i][j] = 0.0; }
}
// copy a block vector
void copy_vec(int nblock, // block size
int nvec, // dimension of vectors (# of blocks)
double *src, // source vector [nblock*nblock*nvec]
double *dst) // destination vector [nblock*nblock*nvec]
{
int ndata = nblock*nblock*nvec;
#pragma omp parallel for
for(int i=0 ; i<ndata ; i++)
{ dst[i] = src[i]; }
}
// copy a block-sparse matrix
void copy_mat(int nblock, // block size
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrix [1]
double **src, // matrix elements of the source sparse matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **dst) // matrix elements of the target sparse matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
#pragma omp parallel for collapse(2)
for(int i=0 ; i<sparsity->col[sparsity->ncol] ; i++)
for(int j=0 ; j<nblock*nblock ; j++)
{ dst[i][j] = src[i][j]; }
}
// rescale a block vector: dst = alpha*dst
// NOTE: each column has a different weight
void scale_vec(int nblock, // block size
int nvec, // dimension of vectors (# of blocks)
double *alpha, // scale factors [nblock]
double *vec) // vector elements [nblock*nblock*nvec]
{
#pragma omp parallel for collapse(3)
for(int i=0 ; i<nvec ; i++)
for(int j=0 ; j<nblock ; j++)
for(int k=0 ; k<nblock ; k++)
{ vec[k+(j+i*nblock)*nblock] *= alpha[j]; }
}
// rescale a block-sparse matrix: dst = alpha*dst
void scale_mat(int nblock, // block size
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrix [1]
double alpha,
double **mat) // matrix elements of the sparse matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
#pragma omp parallel for collapse(2)
for(int i=0 ; i<sparsity->col[sparsity->ncol] ; i++)
for(int j=0 ; j<nblock*nblock ; j++)
{ mat[i][j] *= alpha; }
}
// add two block vectors in BLAS ?AXPY form: dst = alpha*src + dst
// NOTE: each column has a different weight
void add_vec(int nblock, // block size
int nvec, // dimension of vectors (# of blocks)
double *alpha, // scale factors on src [nblock]
double *src, // source vector [nblock*nblock*nvec]
double *dst) // destination vector [nblock*nblock*nvec]
{
#pragma omp parallel for collapse(3)
for(int i=0 ; i<nvec ; i++)
for(int j=0 ; j<nblock ; j++)
for(int k=0 ; k<nblock ; k++)
{ dst[k+(j+i*nblock)*nblock] += alpha[j]*src[k+(j+i*nblock)*nblock]; }
}
// add two block-sparse matrices in BLAS ?AXPY form: dst = alpha*src + dst
void add_mat(int nblock, // block size
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrix [1]
double alpha,
double **src, // matrix elements of the source sparse matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **dst) // matrix elements of the target sparse matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
#pragma omp parallel for collapse(2)
for(int i=0 ; i<sparsity->col[sparsity->ncol] ; i++)
for(int j=0 ; j<nblock*nblock ; j++)
{ dst[i][j] += alpha*src[i][j]; }
}
// inner products between columns of two block vectors
void dot_vec(int nblock, // block size
int nvec, // dimension of vectors (# of blocks)
double *vec1, // source vector [nblock*nblock*nvec]
double *vec2, // destination vector [nblock*nblock*nvec]
double *dot) // accumulated dot products on output [nblock]
{
for(int i=0 ; i<nblock ; i++) { dot[i] = 0.0; }
#pragma omp parallel
// begin openmp block
{
double local_dot[NBLOCK_MAX];
for(int i=0 ; i<nblock ; i++) { local_dot[i] = 0.0; }
#pragma omp for collapse(3)
for(int i=0 ; i<nvec ; i++)
for(int j=0 ; j<nblock ; j++)
for(int k=0 ; k<nblock ; k++)
{ local_dot[j] += vec1[k+(j+i*nblock)*nblock]*vec2[k+(j+i*nblock)*nblock]; }
for(int i=0 ; i<nblock ; i++)
{
#pragma omp atomic
dot[i] += local_dot[i];
}
}
// end openmp block
}
// inner product (trace) between two sparse matrices of the same pattern
// RETURN: value of the inner product
double dot_mat(int nblock, // block size
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrix [1]
double **mat1, // matrix elements of the source sparse matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **mat2) // matrix elements of the target sparse matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
double ans = 0.0;
#pragma omp parallel for collapse(2) reduction(+:ans)
for(int i=0 ; i<sparsity->col[sparsity->ncol] ; i++)
for(int j=0 ; j<nblock*nblock ; j++)
{ ans += mat1[i][j]*mat2[i][j]; }
return ans;
}
// block-sparse matrix-vector multiplication in BLAS ?GEMV form, vec_out = alpha*mat^T*vec_in + beta*vec_out
void mat_vec(int nblock, // matrix & vector block size
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrix [1]
double alpha, // scale factor on vec_in*mat
double beta, // scale factor on vec_out
double **mat, // matrix elements of the sparse matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double *vec_in, // input block vector [sparsity->nrow*nblock*nblock]
double *vec_out) // output block vector [sparsity->ncol*nblock*nblock]
{
// loop over block entries of vec_out
#pragma omp parallel for
for(int i=0 ; i<sparsity->ncol ; i++)
{
// rescale entries of vec_out by beta
for(int j=0 ; j<nblock*nblock ; j++) { vec_out[j+i*nblock*nblock] *= beta; }
// loop over nonzero blocks in the column
for(int j=sparsity->col[i] ; j<sparsity->col[i+1] ; j++)
{
// accumulate blocks of the solution (BLAS call)
char transa = 'T', transb = 'N';
double one = 1.0;
MKL_INT n = nblock;
dgemm(&transa,&transb,&n,&n,&n,&alpha,mat[j],&n,&(vec_in[sparsity->row[j]*nblock*nblock]),
&n,&one,&(vec_out[i*nblock*nblock]),&n);
}
}
}
// complex wrapper for vec_out = (mat_base + shift*mat_shift)^T*vec_in
void zmat_zvec(int nblock, // matrix & vector block size
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrix [1]
double complex shift, // complex shift applied to mat2
double **mat_base, // base matrix in mat-vec operation [sparsity->col[sparsity->ncol]][nblock*nblock]
double **mat_shift, // shifted matrix in mat-vec operation [sparsity->col[sparsity->ncol]][nblock*nblock]
double *vec_in, // input block vector, contiguous real & imaginary parts [2*sparsity->nrow*nblock*nblock]
double *vec_out) // output block vector, contiguous real & imaginary parts [2*sparsity->ncol*nblock*nblock]
{
int nvec_in = sparsity->nrow*nblock*nblock;
int nvec_out = sparsity->ncol*nblock*nblock;
// vec_out = mat_shift^T*vec_in
mat_vec(nblock,sparsity,1.0,0.0,mat_shift,vec_in,vec_out);
mat_vec(nblock,sparsity,1.0,0.0,mat_shift,&(vec_in[nvec_in]),&(vec_out[nvec_out]));
// vec_out <- shift*vec_out
#pragma omp parallel for
for(int i=0 ; i<nvec_out ; i++)
{
double complex work = shift*(vec_out[i] + I*vec_out[i+nvec_out]);
vec_out[i] = creal(work);
vec_out[i+nvec_out] = cimag(work);
}
// include the base part of the matrix: vec_out <- vec_out + mat_base^T*vec_in
mat_vec(nblock,sparsity,1.0,1.0,mat_base,vec_in,vec_out);
mat_vec(nblock,sparsity,1.0,1.0,mat_base,&(vec_in[nvec_in]),&(vec_out[nvec_out]));
}
// add a block vector to the column of a block-sparse matrix within its sparsity pattern
void add_col(int nblock, // matrix & vector block size
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrix [1]
int icol, // column index to update
double wt, // weight to add the vector with
double *vec, // dense block vector to add [sparsity->nrow*nblock*nblock]
double **mat) // matrix elements of the sparse matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
#pragma omp parallel for collapse(2)
for(int i=sparsity->col[icol] ; i<sparsity->col[icol+1] ; i++)
{
for(int j=0 ; j<nblock*nblock ; j++)
{ mat[i][j] += wt*vec[j+sparsity->row[i]*nblock*nblock]; }
}
}
// comparison function for finding rows using the C bsearch function in stdlib.h
// RETURN: 1 if a goes after b, -1 if a goes before b, 0 if they are equal
int row_compare(const void *a, const void *b)
{
if( ((int*)a)[0] > ((int*)b)[0] ) return 1;
if( ((int*)a)[0] < ((int*)b)[0] ) return -1;
return 0;
}
// add a block vector to the row of a block-sparse matrix within its sparsity pattern
// NOTE: the loop over all columns could be restricted with the promise of a symmetric sparsity pattern
void add_row(int nblock, // matrix & vector block size
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrix [1]
int irow, // row index to update
double wt, // weight to add the vector with
double *vec, // dense block vector to add [sparsity->ncol*nblock*nblock]
double **mat) // matrix elements of the sparse matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
#pragma omp parallel for
for(int i=0 ; i<sparsity->ncol ; i++)
{
// search for the row index inside the column
int nnz_col = sparsity->col[i+1] - sparsity->col[i];
int *row_ptr = (int*)bsearch(&irow,&(sparsity->row[sparsity->col[i]]),nnz_col,sizeof(int),row_compare);
// add the matrix element block
if(row_ptr != NULL)
{
int ielem = (int)(row_ptr - sparsity->row); // pointer arithmetic
for(int j=0 ; j<nblock ; j++)
for(int k=0 ; k<nblock ; k++)
{ mat[ielem][j+k*nblock] += wt*vec[k+(j+i*nblock)*nblock]; }
}
}
}
//=====================================//
// 5. MATRIX CONSTRUCTION & CONVERSION //
//=====================================//
// block-sparse construction of hamiltonian & overlap (each block is a 9-by-9 atomic subspace)
void tb_matrix(int natom, // number of atoms
double *atom, // atomic coordinates [3*natom]
struct nrl_tb *param, // tight-binding parameters [1]
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrix [1]
double **hamiltonian, // Hamiltonian matrix elements [sparsity->col[sparsity->ncol]][nblock*nblock]
double **overlap) // overlap matrix elements [sparsity->col[sparsity->ncol]][nblock*nblock]
{
// fill in the sparse matrix
#pragma omp parallel for
for(int i=0 ; i<sparsity->ncol ; i++)
{
for(int j=sparsity->col[i] ; j<sparsity->col[i+1] ; j++)
{
// calculate block matrix elements
if(i == sparsity->row[j])
{
tb_diagonal(i,natom,atom,sparsity->col[i+1]-sparsity->col[i],&(sparsity->row[sparsity->col[i]]),
param,hamiltonian[j],overlap[j]);
}
else
{ tb_offdiagonal(i,sparsity->row[j],natom,atom,param,hamiltonian[j],overlap[j]); }
}
}
}
// block-sparse to dense matrix embedding
// NOTE: some arrays here can be larger than the maximum value of "int" and need "size_t" indices
void embed_mat(int nblock, // matrix block size
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrix [1]
double **smat, // matrix elements of the sparse matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double *dmat) // dense matrix [sparsity->nrow*sparsity->ncol*nblock*nblock]
{
// fill dense matrix with zeros
size_t nrow = sparsity->nrow*nblock, ndata = nrow*sparsity->ncol*nblock;
#pragma omp parallel for
for(size_t i=0 ; i<ndata ; i++)
{ dmat[i] = 0.0; }
// block-by-block transfer of block-sparse matrix
#pragma omp parallel for
for(size_t i=0 ; i<sparsity->ncol ; i++)
for(size_t j=sparsity->col[i] ; j<sparsity->col[i+1] ; j++)
{
// copy a block
for(size_t k=0 ; k<nblock ; k++)
for(size_t l=0 ; l<nblock ; l++)
{ dmat[(l+sparsity->row[j]*nblock)+(k+i*nblock)*nrow] = smat[j][l+k*nblock]; }
}
}
// block-sparse matrix restriction of a block vector outer product (accumulate solution)
void restrict_outvec(int nblock, // matrix & vector block size
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrix [1]
double *leftvec, // left vector [sparsity->nrow*nblock*nblock]
double *rightvec, // right vector [sparsity->ncol*nblock*nblock]
double **mat) // matrix elements of the sparse matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
// loop over nonzero blocks of the sparsity matrix
#pragma omp parallel for
for(int i=0 ; i<sparsity->ncol ; i++)
for(int j=sparsity->col[i] ; j<sparsity->col[i+1] ; j++)
{
// calculate block outer product between leftmat & rightmat (BLAS call)
char transa = 'N', transb = 'T';
double one = 1.0;
MKL_INT n = nblock;
dgemm(&transa,&transb,&n,&n,&n,&one,&(leftvec[sparsity->row[j]*nblock*nblock]),&n,
&(rightvec[i*nblock*nblock]),&n,&one,mat[j],&n);
}
}
// block-sparse matrix restriction of a matrix outer product (accumulate solution)
void restrict_outmat(int nblock, // matrix & vector block size
int nouter, // inner matrix dimension between left & right matrices
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrix [1]
double *leftmat, // left matrix [sparsity->nrow*nblock*nouter]
double *rightmat, // right matrix [sparsity->ncol*nblock*nouter]
double **mat) // matrix elements of the sparse matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
// loop over nonzero blocks of bmat
for(int i=0 ; i<sparsity->ncol ; i++)
for(int j=sparsity->col[i] ; j<sparsity->col[i+1] ; j++)
{
// calculate block outer product between leftmat & rightmat (BLAS call)
char transa = 'N', transb = 'T';
double one = 1.0;
MKL_INT n = nblock, m = nouter, lda = sparsity->nrow*nblock, ldb = sparsity->ncol*nblock;
dgemm(&transa,&transb,&n,&n,&m,&one,&(leftmat[sparsity->row[j]*nblock]),&lda,
&(rightmat[i*nblock]),&ldb,&one,mat[j],&n);
}
}
// conversion between block-sparse & ordered-pair sparse matrix formats
void block2sparse(int nblock, // matrix block size
struct pattern *b_sparsity, // contains the sparsity pattern & dimensions of the block-sparse matrix [1]
struct pattern *s_sparsity, // contains the sparsity pattern & dimensions of the sparse matrix [1]
double **bmat1, // first input block matrix [sparsity_in->col[sparsity_in->ncol]][nblock*nblock]
double **bmat2, // second input block matrix [sparsity_in->col[sparsity_in->ncol]][nblock*nblock]
double *smat12) // output sparse matrix [2*nblock*nblock*sparsity_in->col[sparsity_in->ncol]]
{
// allocate memory for the ordered-pair sparsity pattern
s_sparsity->ncol = b_sparsity->ncol*nblock;
s_sparsity->nrow = b_sparsity->nrow*nblock;
s_sparsity->col = (int*)malloc(sizeof(int)*(s_sparsity->ncol+1));
s_sparsity->row = (int*)malloc(sizeof(int)*nblock*nblock*b_sparsity->col[b_sparsity->ncol]);
// loop over elements of the block-sparse matrix
#pragma omp parallel for collapse(2)
for(int i=0 ; i<b_sparsity->ncol ; i++)
for(int j=0 ; j<nblock ; j++)
{
s_sparsity->col[j+i*nblock] = b_sparsity->col[i]*nblock*nblock + j*(b_sparsity->col[i+1] - b_sparsity->col[i])*nblock;
for(int k=0 ; k<b_sparsity->col[i+1]-b_sparsity->col[i] ; k++)
for(int l=0 ; l<nblock ; l++)
{
s_sparsity->row[l+k*nblock+s_sparsity->col[j+i*nblock]] = l + b_sparsity->row[b_sparsity->col[i]+k]*nblock;
smat12[2*(l+k*nblock+s_sparsity->col[j+i*nblock])] = bmat1[b_sparsity->col[i]+k][l+j*nblock];
smat12[2*(l+k*nblock+s_sparsity->col[j+i*nblock])+1] = bmat2[b_sparsity->col[i]+k][l+j*nblock];
}
}
s_sparsity->col[s_sparsity->ncol] = b_sparsity->col[b_sparsity->ncol]*nblock*nblock;
}
// conversion between ordered-pair sparse & block-sparse matrix formats (reverse of previous operation)
void sparse2block(int nblock, // matrix block size
struct pattern *s_sparsity, // contains the sparsity pattern & dimensions of the sparse matrix [1]
struct pattern *b_sparsity, // contains the sparsity pattern & dimensions of the block-sparse matrix [1]
double *smat12, // input sparse matrix [2*nblock*nblock*sparsity_out->col[sparsity_out->ncol]]
double **bmat1, // first output block matrix [sparsity_out->col[sparsity_out->ncol]][nblock*nblock]
double **bmat2) // second output block matrix [sparsity_out->col[sparsity_out->ncol]][nblock*nblock]
{
// loop over elements of the block-sparse matrix
#pragma omp parallel for collapse(2)
for(int i=0 ; i<b_sparsity->ncol ; i++)
for(int j=0 ; j<nblock ; j++)
for(int k=0 ; k<b_sparsity->col[i+1]-b_sparsity->col[i] ; k++)
for(int l=0 ; l<nblock ; l++)
{
bmat1[b_sparsity->col[i]+k][l+j*nblock] = smat12[2*(l+k*nblock+s_sparsity->col[j+i*nblock])];
bmat2[b_sparsity->col[i]+k][l+j*nblock] = smat12[2*(l+k*nblock+s_sparsity->col[j+i*nblock])+1];
}
// free memory for the ordered-pair sparsity pattern
free_pattern(s_sparsity);
}
// add src to dst with different sparsity patterns: dst = alpha*src + dst
void sparse2sparse(int nblock, // matrix block size
struct pattern *sparsity_in, // contains the sparsity pattern of the input matrix [1]
struct pattern *sparsity_out, // contains the sparsity pattern of the output matrix [1]
double alpha, // coefficient in matrix addition
double **src, // input matrix [sparsity_in->col[sparsity_in->ncol]][nblock*nblock]
double **dst) // output matrix [sparsity_out->col[sparsity_out->ncol]][nblock*nblock]
{
if(sparsity_in->nrow != sparsity_out->nrow || sparsity_in->ncol != sparsity_out->ncol)
{
printf("ERROR: sparse-to-sparse addition of matrices w/ incompatible dimension\n");
MPI_Abort(MPI_COMM_WORLD,0);
}
int is_subset = 1;
for(int i=0 ; i<sparsity_in->ncol ; i++)
{
int k = sparsity_out->col[i];
for(int j=sparsity_in->col[i] ; j<sparsity_in->col[i+1] ; j++)
{
while(sparsity_out->row[k] < sparsity_in->row[j] && k<sparsity_out->col[i+1])
{ k++; }
if(sparsity_out->row[k] == sparsity_in->row[j] && k<sparsity_out->col[i+1])
{ add_vec(1,nblock*nblock,&alpha,src[j],dst[k++]); }
}
}
}
// distributes a sparsity pattern from mpirank == 0 to all the other MPI processes by splitting uniformly over columns
// RETURN: global number of nonzero matrix elements, g_sparsity->col[g_sparsity->ncol]
int split_pattern(int mpirank, // rank of this MPI process
int mpisize, // total number of MPI processes
struct pattern *g_sparsity, // contains the sparsity pattern & dimensions of the global matrix [1]
struct pattern *l_sparsity) // contains the sparsity pattern & dimensions of the local matrix [1]
{
int nnz;
if(mpirank == 0)
{
nnz = g_sparsity->col[g_sparsity->ncol];
MPI_Bcast(&(g_sparsity->nrow),1,MPI_INT,0,MPI_COMM_WORLD);
for(int i=0 ; i<mpisize ; i++)
{
int ncol_local = g_sparsity->ncol/mpisize, icol_head = i*ncol_local;
if(i == mpisize-1) { ncol_local = g_sparsity->ncol - (mpisize-1)*ncol_local; } // last MPI process gets more columns
int nnz_local = g_sparsity->col[icol_head+ncol_local] - g_sparsity->col[icol_head];
int irow_head = g_sparsity->col[icol_head];
if(i == 0)
{
l_sparsity->ncol = ncol_local;
l_sparsity->nrow = g_sparsity->nrow;
l_sparsity->col = (int*)malloc(sizeof(int)*(l_sparsity->ncol+1));
l_sparsity->row = (int*)malloc(sizeof(int)*nnz_local);
for(int j=0 ; j<=l_sparsity->ncol ; j++)
{ l_sparsity->col[j] = g_sparsity->col[icol_head+j] - g_sparsity->col[icol_head]; }
for(int j=0 ; j<nnz_local ; j++)
{ l_sparsity->row[j] = g_sparsity->row[irow_head+j]; }
}
else
{
MPI_Send(&ncol_local,1,MPI_INT,i,0,MPI_COMM_WORLD);
MPI_Send(&nnz_local,1,MPI_INT,i,0,MPI_COMM_WORLD);
MPI_Send(&(g_sparsity->col[icol_head]),ncol_local+1,MPI_INT,i,0,MPI_COMM_WORLD);
MPI_Send(&(g_sparsity->row[irow_head]),nnz_local,MPI_INT,i,0,MPI_COMM_WORLD);
}
}
}
else
{
int nnz_local;
MPI_Bcast(&(l_sparsity->nrow),1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Recv(&(l_sparsity->ncol),1,MPI_INT,0,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
MPI_Recv(&nnz_local,1,MPI_INT,0,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
l_sparsity->col = (int*)malloc(sizeof(int)*(l_sparsity->ncol+1));
l_sparsity->row = (int*)malloc(sizeof(int)*nnz_local);
MPI_Recv(l_sparsity->col,l_sparsity->ncol+1,MPI_INT,0,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
MPI_Recv(l_sparsity->row,nnz_local,MPI_INT,0,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
// adjust the offsets in column indices
int offset = l_sparsity->col[0];
for(int i=0 ; i<=l_sparsity->ncol ; i++)
{ l_sparsity->col[i] -= offset; }
}
// broadcast global number of nonzeros
MPI_Bcast(&nnz,1,MPI_INT,0,MPI_COMM_WORLD);
return nnz;
}
// build a sparsity pattern in the local restriction of one column of a localization pattern
void localize_pattern(int nlocal, // number of local rows / columns
int *local, // ordered list of local rows / columns [nlocal]
struct pattern *sparsity, // sparsity pattern that is being restricted [1]
struct pattern *local_sparsity) // localized sparsity pattern [1]
{
local_sparsity->ncol = local_sparsity->nrow = nlocal;
local_sparsity->col = (int*)malloc(sizeof(int)*(nlocal+1));
// count number of non-zero entries
int local_inz = 0;
local_sparsity->col[0] = 0;
for(int i=0 ; i<nlocal ; i++)
{
int ilocal = 0;
for(int j=sparsity->col[local[i]] ; j<sparsity->col[local[i]+1] ; j++)
{
while(local[ilocal] < sparsity->row[j] && ilocal < nlocal) { ilocal++; }
if(ilocal == nlocal) { break; }
if(local[ilocal] == sparsity->row[j]) { local_inz++; }
}
local_sparsity->col[i+1] = local_inz;
}
local_sparsity->row = (int*)malloc(sizeof(int)*local_sparsity->col[local_sparsity->ncol]);
// fill in non-zero entries
local_inz = 0;
for(int i=0 ; i<nlocal ; i++)
{
int ilocal = 0;
for(int j=sparsity->col[local[i]] ; j<sparsity->col[local[i]+1] ; j++)
{
while(local[ilocal] < sparsity->row[j] && ilocal < nlocal) { ilocal++; }
if(ilocal == nlocal) { break; }
if(local[ilocal] == sparsity->row[j]) { local_sparsity->row[local_inz++] = ilocal; }
}
}
}
// build a localized block-sparse matrix that points to the data of the original block-sparse matrix
void localize_mat(int nlocal, // number of local rows / columns
int *local, // ordered list of local rows / columns [nlocal]
struct pattern *sparsity, // original sparsity pattern [1]
double **mat, // original sparse matrix [sparsity->ncol[sparsity[col]][*]
double **local_mat) // localized sparse matrix [sparsity->ncol[sparsity[col]][*]
{
int local_inz = 0;
for(int i=0 ; i<nlocal ; i++)
{
int ilocal = 0;
for(int j=sparsity->col[local[i]] ; j<sparsity->col[local[i]+1] ; j++)
{
while(local[ilocal] < sparsity->row[j] && ilocal < nlocal) { ilocal++; }
if(ilocal == nlocal) { break; }
if(local[ilocal] == sparsity->row[j]) { local_mat[local_inz++] = mat[j]; }
}
}
}
// allocate memory for a periodic symmetric sparse matrix where all elements are pointers to the first row & column
void crystal_malloc(int nblock, // size of matrix blocks
struct pattern *sparsity, // sparsity pattern of the matrix [1]
int *latvec, // ordered list of lattice vectors [3*sparsity->ncol]
double **mat) // matrix element pointers for the sparse matrix [sparsity->col[sparsity->ncol]][1]
{
// allocate the non-redundant memory
int nnz = sparsity->col[1], elem00 = 1;
if(sparsity->row[0] != 0) { elem00 = 0; }
mat[0] = (double*)malloc(sizeof(double)*(2*nnz-elem00)*nblock*nblock);
for(int i=1 ; i<nnz ; i++)
{ mat[i] = &(mat[i-1][nblock*nblock]); }
// assign memory to the first row
mat[sparsity->col[sparsity->row[elem00]]] = &(mat[nnz-1][nblock*nblock]);
for(int i=1+elem00 ; i<nnz ; i++)
{ mat[sparsity->col[sparsity->row[i]]] = &(mat[sparsity->col[sparsity->row[i-1]]][nblock*nblock]); }
// loop over columns, excluding the first
for(int i=1 ; i<sparsity->ncol ; i++)
{
int latvec0[3];
for(int j=0 ; j<3 ; j++) { latvec0[j] = latvec[j+3*i]; }
// loop over matrix elements in the first column
for(int j=0 ; j<sparsity->col[1] ; j++)
{
// shift lattice vector of matrix element & search for it in the list
int latvec1[3];
for(int k=0 ; k<3 ; k++) { latvec1[k] = latvec0[k] + latvec[k+3*sparsity->row[j]]; }
int *latvec_ptr = (int*)bsearch(latvec1,latvec,sparsity->ncol,3*sizeof(int),latvec_compare);
// search for index in the sparsity pattern & assign the pointer
if(latvec_ptr != NULL)
{
int irow = (int)(latvec_ptr - latvec)/3;
int nnz_col = sparsity->col[i+1] - sparsity->col[i];
int *row_ptr = (int*)bsearch(&irow,&(sparsity->row[sparsity->col[i]]),nnz_col,sizeof(int),row_compare);
if(irow >= i && row_ptr != NULL)
{
int ielem = (int)(row_ptr - sparsity->row);
mat[ielem] = mat[j];
}
}
// search for conjugate matrix element
for(int k=0 ; k<3 ; k++) { latvec1[k] = latvec0[k] - latvec[k+3*sparsity->row[j]]; }
latvec_ptr = (int*)bsearch(latvec1,latvec,sparsity->ncol,3*sizeof(int),latvec_compare);
// search for index in the sparsity pattern & assign the pointer
if(latvec_ptr != NULL)
{
int irow = (int)(latvec_ptr - latvec)/3;
int nnz_col = sparsity->col[i+1] - sparsity->col[i];
int *row_ptr = (int*)bsearch(&irow,&(sparsity->row[sparsity->col[i]]),nnz_col,sizeof(int),row_compare);
if(irow < i && irow > 0 && row_ptr != NULL)
{
int ielem = (int)(row_ptr - sparsity->row);
mat[ielem] = mat[sparsity->col[sparsity->row[j]]];
}
}
}
}
}
//===================================//
// 6. PSEUDORANDOM NUMBER GENERATION //
//===================================//
// C rand() & srand() are not guaranteed to be reproducible
// & many of their default implementations are considered bad
// Here we define a simple pseudorandom number generator
// PRNG that passes BigCrush empirical randomness tests, xorshift1024star() from [http://en.wikipedia.org/wiki/Xorshift]
uint64_t random64(const uint32_t seed) // 0 for normal use, nonzero seed value to reseed
{
static uint64_t s[16];
static uint8_t p;
// seed & "warm up" the PRNG
if(seed != 0)
{
p = 0;
uint32_t i;
for(i=0 ; i<16 ; i++) s[i] = seed + i;
for(i=0 ; i<16384 ; i++) random64(0);
}
uint64_t s0 = s[p];
p = (p + 1) & 15;
uint64_t s1 = s[p];
s1 ^= s1 << 31; // a
s1 ^= s1 >> 11; // b
s0 ^= s0 >> 30; // c
s[p] = s0 ^ s1;
return s[p] * 1181783497276652981ULL;
}
// pseudorandom uniform distribution over (0,1]
double random_uniform()
{
// reduce from 64 random bits to 53 random bits that span the representable unpadded integers using a double
return (double)((random64(0) >> 11) + 1)/9007199254740992.0;
}
// pseudorandom block-sparse complex rotor vector
void random_vec(int nblock, // size of blocks
int nvec, // number of blocks
int nnz, // number of nonzero rotors
int *index, // block index of rotors [nnz]
double *vec) // random vector, contiguous real & imaginary parts [2*nblock*nblock*nvec]
{
zero_vec(nblock,2*nvec,vec);
double *vec_real = vec, *vec_imag = &(vec[nblock*nblock*nvec]);
for(int i=0 ; i<nnz ; i++)
{
for(int j=0 ; j<nblock ; j++)
{
double phase = 2.0*M_PI*random_uniform();
vec_real[j+(j+index[i]*nblock)*nblock] = cos(phase);
vec_imag[j+(j+index[i]*nblock)*nblock] = sin(phase);
}
}
}
//======================//
// 7. ITERATIVE SOLVERS //
//======================//
// Operational condition number estimate: ||r_n|| <= 2*sqrt(K)*[(sqrt(K) - 1)/(sqrt(K) + 1)]^n * ||b||
double condition_number(double epsilon, // CG iteration tolerance
int niter) // average number of iterations
{
// simple iteration for x = sqrt(K)
double sqrtK0, sqrtK = 1.0;
do
{
sqrtK0 = sqrtK;
double dtemp = pow(0.5*epsilon/sqrtK0,1.0/(double)niter);
sqrtK = (1.0 + dtemp)/(1.0 - dtemp);
}while(fabs(sqrtK0 - sqrtK) > 1e-8*sqrtK);
// solve x = sqrt(K) for K
return pow(sqrtK,2);
}
// Apply the inverse of a symmetric positive definite matrix to a vector using the standard conjugate gradient algorithm
// NOTE: using Table 2.1 pseudocode from [D. C.-L. Fong and M. Saunders, SQU Journal for Science 17, 44-62 (2012)]
int spd_inv(int nblock, // matrix & vector block size
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrix [1]
double res_tol, // target residual error
double **mat, // matrix elements of the SPD sparse matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double *vec, // vector with overlap matrix inverse applied to it on output [sparsity->nrow*nblock*nblock]
double *work) // pre-allocated workspace [3*sparsity->nrow*nblock*nblock]
{
int num_iter = 0, nrow = sparsity->nrow;
double res_tol2 = res_tol*res_tol;
double alpha[NBLOCK_MAX], beta[NBLOCK_MAX], rho0[NBLOCK_MAX], rho_old[NBLOCK_MAX], rho[NBLOCK_MAX];
// memory allocation (p, q, & r)
int ndata = sparsity->nrow*nblock*nblock;
double *p = work;
double *q = &(work[ndata]);
double *r = &(work[2*ndata]);
double *x = vec; // memory reuse
// r = b
copy_vec(nblock,nrow,vec,r);
// x = 0
zero_vec(nblock,nrow,x);
// p = r
copy_vec(nblock,nrow,r,p);
// rho = r^T*r
dot_vec(nblock,nrow,r,r,rho);
for(int i=0 ; i<nblock ; i++) { rho0[i] = rho[i]; }
// repeat until convergence
int not_converged = 1;
while(not_converged)
{
num_iter++;
// q = A*p
mat_vec(nblock,sparsity,1.0,0.0,mat,p,q);
// alpha = rho / p^T*q
dot_vec(nblock,nrow,p,q,alpha);
for(int i=0 ; i<nblock ; i++)
{ if(alpha[i] != 0.0) { alpha[i] = rho[i]/alpha[i]; } }
// x <- x + alpha*p
add_vec(nblock,nrow,alpha,p,x);
// r <- r - alpha*q
for(int i=0 ; i<nblock ; i++) { alpha[i] = -alpha[i]; }
add_vec(nblock,nrow,alpha,q,r);
// rho_old = rho
for(int i=0 ; i<nblock ; i++) { rho_old[i] = rho[i]; }
// rho = r^T*r
dot_vec(nblock,nrow,r,r,rho);
// beta = rho / rho_old
for(int i=0 ; i<nblock ; i++)
{ if(rho_old[i] != 0.0) { beta[i] = rho[i]/rho_old[i]; } }
// p <- r + beta*p
for(int i=0 ; i<nblock ; i++) { alpha[i] = 1.0; }
scale_vec(nblock,nrow,beta,p);
add_vec(nblock,nrow,alpha,r,p);
// check for convergence of all nblock CG solves
not_converged = 0;
for(int i=0 ; i<nblock ; i++)
{ if(rho[i] > res_tol2*rho0[i]) { not_converged = 1; } }
}
return num_iter;
}
// Chebyshev approximation of matrix functions applied to an input vector
int chebyshev_mat(int nblock, // matrix block size
int ncoeff, // number of Chebyshev polynomials
double res_tol, // desired residual tolerance for convergence
double hwt, // Hamiltonian coefficient for scaled Hamiltonian matrix
double owt, // overlap coefficient for scaled Hamiltonian matrix
double *coeff, // density coefficients for Chebyshev polynomials [ncoeff]
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrices [1]
double **scale_hamiltonian, // scaled Hamiltonian matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **overlap, // overlap matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double *vec, // input vector [sparsity->nrow*nblock*nblock]
double *func_vec, // output vector [sparsity->nrow*nblock*nblock]
double *func2_vec, // output vector #2 [sparsity->nrow*nblock*nblock]
double *work) // pre-allocated workspace [6*sparsity->nrow*nblock*nblock]
{
int num_matvec = 0;
// allocate memory for block vectors
int ndata = sparsity->nrow*nblock*nblock;
double *T_old = &(work[3*ndata]);
double *T = &(work[4*ndata]);
double *T_new = &(work[5*ndata]);
// block weights for add_vec
double minus_one[NBLOCK_MAX], block_wt[NBLOCK_MAX];
for(int i=0 ; i<nblock ; i++) { minus_one[i] = -1.0; }
// clear the output vectors
zero_vec(nblock,sparsity->nrow,func_vec);
// setup 1st Chebyshev vector (T0 = 1 -> T_old)
copy_vec(nblock,sparsity->nrow,vec,T_old);
// setup 2nd Chebyshev vector (T1 = H*S^{-1} -> T)
copy_vec(nblock,sparsity->nrow,vec,T_new);
num_matvec += spd_inv(nblock,sparsity,res_tol,overlap,T_new,work) + 1;
mat_vec(nblock,sparsity,1.0,0.0,scale_hamiltonian,T_new,T);
// retain term for the block-sparse density & response matrices
for(int i=0 ; i<nblock ; i++) { block_wt[i] = coeff[0]; }
add_vec(nblock,sparsity->nrow,block_wt,T_old,func_vec);
if(ncoeff > 1)
{
for(int i=0 ; i<nblock ; i++) { block_wt[i] = coeff[1]; }
add_vec(nblock,sparsity->nrow,block_wt,T,func_vec);
}
// loop to build Chebyshev polynomial expansion
for(int i=2 ; i<ncoeff ; i++)
{
// prepare new Chebyshev vector (2*H*S^{-1}*T - T_old -> T_new
copy_vec(nblock,sparsity->nrow,T,T_new);
num_matvec += spd_inv(nblock,sparsity,res_tol,overlap,T_new,work) + 1;
copy_vec(nblock,sparsity->nrow,T_new,work);
mat_vec(nblock,sparsity,2.0,0.0,scale_hamiltonian,work,T_new);
add_vec(nblock,sparsity->nrow,minus_one,T_old,T_new);
// retain term for the block-sparse density & response matrices
for(int j=0 ; j<nblock ; j++) { block_wt[j] = coeff[i]; }
add_vec(nblock,sparsity->nrow,block_wt,T_new,func_vec);
// pointer swapping to avoid copying vectors
double* ptr = T_old;
T_old = T;
T = T_new;
T_new = ptr;
}
// final application of S^{-1} for both density & response matrices
num_matvec += spd_inv(nblock,sparsity,res_tol,overlap,func_vec,work) + 1;
// final additional application of -S^{-1}*H(unscaled) for response matrix
// -S^{-1}*H(unscaled) = -S^{-1}*H/hwt + (owt/hwt)*I
copy_vec(nblock,sparsity->nrow,func_vec,func2_vec);
mat_vec(nblock,sparsity,-1.0/hwt,0.0,scale_hamiltonian,func_vec,func2_vec);
num_matvec += spd_inv(nblock,sparsity,res_tol,overlap,func2_vec,work) + 1;
for(int i=0 ; i<nblock ; i++) { block_wt[i] = owt/hwt; }
add_vec(nblock,sparsity->nrow,block_wt,func_vec,func2_vec);
return num_matvec;
}
// Pre-conditioned complex symmetric linear system solver using CGLS [A*x = b -> P*A*x = P*b]
// NOTE: using CGLS pseudocode from [C. C. Paige and M. A. Saunders, TOMS 8, 43-71 (1982)]
int cgls_inv(int nblock, // matrix & vector block size
struct pattern *mat_sparsity, // sparsity pattern of the sparse matrix [1]
struct pattern *pre_sparsity, // sparsity pattern of the preconditioner (NULL if none) [1]
double complex shift, // complex shift applied to mat_shift: mat_base + shift*mat_shift
double res_tol, // target residual error
double **mat_base, // base part of the sparse matrix [mat_sparsity->col[mat_sparsity->ncol]][nblock*nblock]
double **mat_shift, // shifted part of the sparse matrix [mat_sparsity->col[mat_sparsity->ncol]][nblock*nblock]
double **pre_real, // real part of the preconditioner [pre_sparsity->col[pre_sparsity->ncol]][nblock*nblock]
double **pre_imag, // imaginary part of the preconditioner [pre_sparsity->col[pre_sparsity->ncol]][nblock*nblock]
double *rhs_real, // real part of right-hand-side vector [sparsity->nrow*nblock*nblock]
double *rhs_imag, // imaginary part of right-hand-side vector [sparsity->nrow*nblock*nblock]
double *x, // solution vector (input a guess), contiguous real & imaginary vectors [2*sparsity->nrow*nblock*nblock]
double *work) // pre-allocated workspace [8*sparsity->nrow*nblock*nblock]
{
int num_iter = 0, nrow = 2*mat_sparsity->nrow;
double res_tol2 = res_tol*res_tol;
double alpha[NBLOCK_MAX], beta[NBLOCK_MAX], rho_old[NBLOCK_MAX], rho[NBLOCK_MAX], res0[NBLOCK_MAX], res[NBLOCK_MAX];
// memory allocation (p, q, r, & s)
int ndata = mat_sparsity->nrow*nblock*nblock;
double *p = work;
double *q = &(work[2*ndata]);
double *r = &(work[4*ndata]);
double *s = &(work[6*ndata]);
// compute reference norms for convergence tests (b^H*b)
dot_vec(nblock,nrow/2,rhs_real,rhs_real,res0);
if(rhs_imag != NULL)
{
dot_vec(nblock,nrow/2,rhs_imag,rhs_imag,res);
for(int i=0 ; i<nblock ; i++) { res0[i] += res[i]; }
}
// r = P*(b - A*x)
copy_vec(nblock,nrow/2,rhs_real,r);
if(rhs_imag != NULL) { copy_vec(nblock,nrow/2,rhs_imag,&(r[ndata])); }
else { zero_vec(nblock,nrow/2,&(r[ndata])); }
zmat_zvec(nblock,mat_sparsity,shift,mat_base,mat_shift,x,s);
for(int i=0 ; i<nblock ; i++) { alpha[i] = -1.0; }
add_vec(nblock,nrow,alpha,s,r);
if(pre_sparsity != NULL)
{
copy_vec(nblock,nrow,r,s);
zmat_zvec(nblock,pre_sparsity,I,pre_real,pre_imag,s,r);
}
// p = A^H*P^H*r
if(pre_sparsity != NULL)
{
zmat_zvec(nblock,pre_sparsity,-I,pre_real,pre_imag,r,s);
zmat_zvec(nblock,mat_sparsity,conj(shift),mat_base,mat_shift,s,p);
}
else
{ zmat_zvec(nblock,mat_sparsity,conj(shift),mat_base,mat_shift,r,p); }
// rho = p^H*p
dot_vec(nblock,nrow,p,p,rho);
// repeat until convergence
int not_converged = 1;
while(not_converged)
{
num_iter++;
// q = P*A*p
if(pre_sparsity != NULL)
{
zmat_zvec(nblock,mat_sparsity,shift,mat_base,mat_shift,p,s);
zmat_zvec(nblock,pre_sparsity,I,pre_real,pre_imag,s,q);
}
else
{ zmat_zvec(nblock,mat_sparsity,shift,mat_base,mat_shift,p,q); }
// alpha = rho / q^H*q
dot_vec(nblock,nrow,q,q,alpha);
for(int i=0 ; i<nblock ; i++)
{ if(alpha[i] != 0.0) { alpha[i] = rho[i]/alpha[i]; } }
// x <- x + alpha*p
add_vec(nblock,nrow,alpha,p,x);
// r <- r - alpha*q
for(int i=0 ; i<nblock ; i++) { alpha[i] = -alpha[i]; }
add_vec(nblock,nrow,alpha,q,r);
// q = A^H*P^H*r
if(pre_sparsity != NULL)
{
zmat_zvec(nblock,pre_sparsity,-I,pre_real,pre_imag,r,s);
zmat_zvec(nblock,mat_sparsity,conj(shift),mat_base,mat_shift,s,q);
}
else
{ zmat_zvec(nblock,mat_sparsity,conj(shift),mat_base,mat_shift,r,q); }
// rho_old = rho
for(int i=0 ; i<nblock ; i++) { rho_old[i] = rho[i]; }
// rho = q^H*q
dot_vec(nblock,nrow,q,q,rho);
// beta = rho / rho_old
for(int i=0 ; i<nblock ; i++)
{ if(rho_old[i] != 0.0) { beta[i] = rho[i]/rho_old[i]; } }
// p <- q + beta*p
for(int i=0 ; i<nblock ; i++) { alpha[i] = 1.0; }
scale_vec(nblock,nrow,beta,p);
add_vec(nblock,nrow,alpha,q,p);
// check for convergence of all nblock CG solves
dot_vec(nblock,nrow,r,r,res);
not_converged = 0;
for(int i=0 ; i<nblock ; i++)
{ if(res[i] > res_tol2*res0[i]) { not_converged = 1; } }
}
return num_iter;
}
// rational approximation of matrix functions applied to a real input vector
int rational_mat(int nblock, // matrix block size
int npole, // number of pole pairs in the rational approximation
double res_tol, // desired residual tolerance for convergence
double complex *w, // weights for rational approximation [npole]
double complex *z, // poles for rational approximation [npole]
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrices [1]
double **hamiltonian, // Hamiltonian matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **overlap, // overlap matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double *vec, // input vector [sparsity->nrow*nblock*nblock]
double *density_vec, // density vector [sparsity->nrow*nblock*nblock]
double *response_vec, // response vector [sparsity->nrow*nblock*nblock]
double *work) // pre-allocated workspace [10*sparsity->nrow*nblock*nblock]
{
int num_matvec = 0;
// clear locations for solution vectors
zero_vec(nblock,sparsity->nrow,density_vec);
zero_vec(nblock,sparsity->nrow,response_vec);
// allocate memory
int ndata = sparsity->nrow*nblock*nblock;
double *inverse_vec = &(work[8*ndata]);
double *inverse_vec_real = inverse_vec, *inverse_vec_imag = &(inverse_vec[ndata]);
// accumulate wt0 = -2.0*sum_i w_i
double wt0 = 0.0;
for(int i=0 ; i<npole ; i++) { wt0 -= 2.0*creal(w[i]); }
// response correction with wt0*S^{-1}
copy_vec(nblock,sparsity->nrow,vec,inverse_vec_real);
zero_vec(nblock,sparsity->nrow,inverse_vec_imag);
num_matvec += spd_inv(nblock,sparsity,res_tol,overlap,inverse_vec_real,work);
double wt[NBLOCK_MAX];
for(int i=0 ; i<nblock ; i++) { wt[i] = wt0; }
add_vec(nblock,sparsity->nrow,wt,inverse_vec_real,response_vec);
// loop over poles & solve for shifted inverses
for(int i=0 ; i<npole ; i++)
{
// each complex mat-vec operation is equivalent to 4 real mat-vec operations
num_matvec += 8 + 8*cgls_inv(nblock,sparsity,NULL,-z[i],res_tol,hamiltonian,overlap,NULL,NULL,vec,NULL,inverse_vec,work);
for(int j=0 ; j<nblock ; j++) { wt[j] = 2.0*creal(w[i]); }
add_vec(nblock,sparsity->nrow,wt,inverse_vec_real,density_vec);
for(int j=0 ; j<nblock ; j++) { wt[j] = -2.0*cimag(w[i]); }
add_vec(nblock,sparsity->nrow,wt,inverse_vec_imag,density_vec);
for(int j=0 ; j<nblock ; j++) { wt[j] = -2.0*creal(z[i]*w[i]); }
add_vec(nblock,sparsity->nrow,wt,inverse_vec_real,response_vec);
for(int j=0 ; j<nblock ; j++) { wt[j] = 2.0*cimag(z[i]*w[i]); }
add_vec(nblock,sparsity->nrow,wt,inverse_vec_imag,response_vec);
}
return num_matvec;
}
//=================//
// 8. SOLVER MAINS //
//=================//
// reference solver: embed the sparse matrix into a dense matrix, diagonalize, & restrict the density matrix to a sparse matrix
// NOTE: some arrays here can be larger than the maximum value of "int" and need "size_t" to function
void dense_solver(int nblock, // matrix block size
double potential, // chemical potential of the system
double temperature, // temperature of the system
double min_energy, // assumed minimum energy (to be checked)
double max_energy, // assumed maximum energy (to be checked)
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrices [1]
double **hamiltonian, // Hamiltonian matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **overlap, // overlap matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **density, // restricted density matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **response) // restricted response matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
// embed sparse matrices into dense matrices for LAPACK call
size_t n = sparsity->nrow*nblock;
double *dense_hamiltonian = (double*)malloc(sizeof(double)*n*n);
double *dense_overlap = (double*)malloc(sizeof(double)*n*n);
embed_mat(nblock,sparsity,hamiltonian,dense_hamiltonian);
embed_mat(nblock,sparsity,overlap,dense_overlap);
// zero density & response block-sparse matrices
zero_mat(nblock,sparsity,density);
zero_mat(nblock,sparsity,response);
// diagonalize the Hamiltonian (LAPACK call)
char jobz = 'V', uplo = 'U';
MKL_INT size = n, itype = 1, lwork = -1, info;
double *eigenvalue = (double*)malloc(sizeof(double)*n);
double work0;
dsygv(&itype,&jobz,&uplo,&size,dense_hamiltonian,&size,dense_overlap,&size,eigenvalue,&work0,&lwork,&info);
if(info != 0) { printf("ERROR: LAPACK dsygv (memory query) returned an error (%d)\n",info); MPI_Abort(MPI_COMM_WORLD,0); }
lwork = (int)work0;
double *work = (double*)malloc(sizeof(double)*lwork);
dsygv(&itype,&jobz,&uplo,&size,dense_hamiltonian,&size,dense_overlap,&size,eigenvalue,work,&lwork,&info);
if(info != 0) { printf("ERROR: LAPACK dsygv returned an error (%d)\n",info); MPI_Abort(MPI_COMM_WORLD,0); }
// energy bounds check
if(eigenvalue[0] < min_energy || eigenvalue[n-1] > max_energy)
{
printf("ERROR: energy bounds check failed, [%e,%e] not in [%e,%e]\n",eigenvalue[0],eigenvalue[n-1],min_energy,max_energy);
MPI_Abort(MPI_COMM_WORLD,0);
}
// sparsify the output density matrix (to avoid an unnecessary cubic-scaling step)
for(size_t i=0 ; i<n ; i++) // fill dense_overlap up with eigenvectors times fermi_dirac(eigenvalues)
{
double func = 2.0*fermi((eigenvalue[i] - potential)/temperature);
copy_vec(1,n,&(dense_hamiltonian[i*n]),&(dense_overlap[i*n]));
scale_vec(1,n,&func,&(dense_overlap[i*n]));
}
restrict_outmat(nblock,n,sparsity,dense_hamiltonian,dense_overlap,density);
// sparsify the output overlap-response matrix (to avoid an unnecessary cubic-scaling step)
for(size_t i=0 ; i<n ; i++) // fill dense_overlap up with eigenvectors times response(eigenvalues)
{
double func = -2.0*eigenvalue[i]*fermi((eigenvalue[i] - potential)/temperature);
copy_vec(1,n,&(dense_hamiltonian[i*n]),&(dense_overlap[i*n]));
scale_vec(1,n,&func,&(dense_overlap[i*n]));
}
restrict_outmat(nblock,n,sparsity,dense_hamiltonian,dense_overlap,response);
// deallocate memory
free(work);
free(eigenvalue);
free(dense_overlap);
free(dense_hamiltonian);
}
// PEXSI solver: convert input to PEXSI's native sparse format & convert output back to a block-sparse format
void PEXSI_solver(int mpirank, // rank of this MPI process
int mpisize, // total number of MPI processes
int nblock, // matrix block size
int npole, // number of pole pairs in the rational approximation
double complex *w, // rational approximation residues [npole]
double complex *z, // rational approximation poles [npole]
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrices [1]
double **hamiltonian, // Hamiltonian matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **overlap, // overlap matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **density, // restricted density matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **response) // restricted response matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
// send non-MPI input parameters to mpirank != 0
MPI_Bcast(&nblock,1,MPI_INT,0,MPI_COMM_WORLD);
MPI_Bcast(&npole,1,MPI_INT,0,MPI_COMM_WORLD);
if(mpirank != 0)
{
w = (double complex*)malloc(sizeof(double complex)*npole);
z = (double complex*)malloc(sizeof(double complex)*npole);
}
MPI_Bcast(w,npole,MPI_C_DOUBLE_COMPLEX,0,MPI_COMM_WORLD);
MPI_Bcast(z,npole,MPI_C_DOUBLE_COMPLEX,0,MPI_COMM_WORLD);
// convert block-sparse hamiltonian & overlap matrices to one sparse matrix with ordered-pair elements
double *hamiltonian_overlap;
struct pattern sparsity2;
if(mpirank == 0)
{
hamiltonian_overlap = (double*)malloc(sizeof(double)*2*nblock*nblock*sparsity->col[sparsity->ncol]);
block2sparse(nblock,sparsity,&sparsity2,hamiltonian,overlap,hamiltonian_overlap);
}
// distribute the sparsity pattern over MPI processes from mpirank == 0
struct pattern local_sparsity;
int nnz = split_pattern(mpirank,mpisize,&sparsity2,&local_sparsity);
int nnz_local = local_sparsity.col[local_sparsity.ncol];
// distribute the local sparse matrices
double *local_hamiltonian_overlap = (double*)malloc(sizeof(double)*2*nnz_local);
if(mpirank == 0)
{
copy_vec(1,2*nnz_local,hamiltonian_overlap,local_hamiltonian_overlap);
int inz = nnz_local;
for(int i=1 ; i<mpisize ; i++)
{
int nnz_local2;
MPI_Recv(&nnz_local2,1,MPI_INT,i,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
MPI_Send(&(hamiltonian_overlap[2*inz]),2*nnz_local2,MPI_DOUBLE,i,0,MPI_COMM_WORLD);
inz += nnz_local2;
}
free(hamiltonian_overlap);
}
else
{
MPI_Send(&nnz_local,1,MPI_INT,0,0,MPI_COMM_WORLD);
MPI_Recv(local_hamiltonian_overlap,2*nnz_local,MPI_DOUBLE,0,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
}
// setup the matrix storing outputs
double *local_density_response = (double*)malloc(sizeof(double)*2*nnz_local);
zero_vec(1,2*nnz_local,local_density_response);
// switch to fortran-style indexing for PEXSI compatibility (1-based instead of 0-based)
for(int i=0 ; i<=local_sparsity.ncol ; i++)
{ local_sparsity.col[i]++; }
for(int i=0 ; i<nnz_local ; i++)
{ local_sparsity.row[i]++; }
// set nprow to largest factor of mpisize that is less than sqrt(mpisize) for naive best load balancing
int nprow, nprow_max = (int)sqrt((double)mpisize) + 1;
for(int i=1 ; i<=nprow_max ; i++)
{ if(mpisize%i == 0) { nprow = i; } }
// setup PEXSI plan & options
int info;
PPEXSIPlan plan = PPEXSIPlanInitialize(MPI_COMM_WORLD,nprow,mpisize/nprow,-1,&info);
PPEXSIOptions options;
PPEXSISetDefaultOptions(&options);
// change comments to switch between SuperLU & symPACK solvers:
options.solver = 0; options.ordering = 0; options.npSymbFact = 1; // SuperLU & ParMETIS
// options.solver = 1; options.ordering = 0; options.npSymbFact = mpisize; // symPACK & PT-Scotch
// NOTE: symPACK should be better because it is specific to symmetric matrices, but it is still early in development
// NOTE: npSymbFact > 1 is not stable for SuperLU
options.verbosity = 0; // 0 disables PEXSI outputs (1 or 2 for debug info)
// load the sparsity pattern
PPEXSILoadRealHSMatrix(plan,options,local_sparsity.nrow,nnz,nnz_local,local_sparsity.ncol,local_sparsity.col,
local_sparsity.row,local_hamiltonian_overlap,1,NULL,&info);
// perform a 1-time symbolic matrix factorization
PPEXSISymbolicFactorizeComplexSymmetricMatrix(plan,options,&info);
// loop over poles
double *local_inverse = (double*)malloc(sizeof(double)*2*nnz_local);
for(int i=0 ; i<npole ; i++)
{
// form the complex-shifted matrix to be inverted
for(int j=0 ; j<nnz_local ; j++)
{
// introduce new scaling & shifting (PEXSI sees the ordered pairs as the real/imaginary parts of the shifted Hamiltonian)
local_hamiltonian_overlap[2*j] -= creal(z[i])*local_hamiltonian_overlap[2*j+1];
local_hamiltonian_overlap[2*j+1] *= -cimag(z[i]);
}
// selected inversion
PPEXSISelInvComplexSymmetricMatrix(plan,options,local_hamiltonian_overlap,local_inverse,&info);
// accumulate density & response matrices
for(int j=0 ; j<nnz_local ; j++)
{
local_density_response[2*j] += 2.0*creal(w[i]*(local_inverse[2*j] + I*local_inverse[2*j+1]));
local_density_response[2*j+1] -= 2.0*creal(z[i]*w[i]*(local_inverse[2*j] + I*local_inverse[2*j+1]));
}
// undo the shifting of the matrix
for(int j=0 ; j<nnz_local ; j++)
{
local_hamiltonian_overlap[2*j+1] /= -cimag(z[i]);
local_hamiltonian_overlap[2*j] += creal(z[i])*local_hamiltonian_overlap[2*j+1];
}
}
// response correction with -wt0*S^{-1}
double wt0 = 0.0;
for(int i=0 ; i<npole ; i++) { wt0 += 2.0*creal(w[i]); }
// replace the shifted Hamiltonian with the overlap matrix
for(int i=0 ; i<nnz_local ; i++)
{
local_hamiltonian_overlap[2*i] = local_hamiltonian_overlap[2*i+1];
local_hamiltonian_overlap[2*i+1] = 0.0;
}
PPEXSISelInvComplexSymmetricMatrix(plan,options,local_hamiltonian_overlap,local_inverse,&info);
for(int i=0 ; i<nnz_local ; i++)
{ local_density_response[2*i+1] -= wt0*local_inverse[2*i]; }
PPEXSIPlanFinalize(plan,&info);
// switch back to C-style indexing (0-based instead of 1-based)
for(int i=0 ; i<=local_sparsity.ncol ; i++)
{ local_sparsity.col[i]--; }
for(int i=0 ; i<nnz_local ; i++)
{ local_sparsity.row[i]--; }
// move the full output matrix back to mpirank == 0
if(mpirank == 0)
{
double *density_response = (double*)malloc(sizeof(double)*2*nblock*nblock*sparsity->col[sparsity->ncol]);
copy_vec(1,2*nnz_local,local_density_response,density_response);
int inz = nnz_local;
for(int i=1 ; i<mpisize ; i++)
{
int nnz_local2;
MPI_Recv(&nnz_local2,1,MPI_INT,i,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
MPI_Recv(&(density_response[2*inz]),2*nnz_local2,MPI_DOUBLE,i,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE);
inz += nnz_local2;
}
sparse2block(nblock,&sparsity2,sparsity,density_response,density,response);
free(density_response);
}
else
{
MPI_Send(&nnz_local,1,MPI_INT,0,0,MPI_COMM_WORLD);
MPI_Send(local_density_response,2*nnz_local,MPI_DOUBLE,0,0,MPI_COMM_WORLD);
}
// deallocate local sparse matrices
free(local_inverse);
free(local_density_response);
free(local_hamiltonian_overlap);
free_pattern(&local_sparsity);
if(mpirank != 0)
{
free(z);
free(w);
}
}
// Quadratic-scaling solver based on polynomial approximation of the Fermi-Dirac distribution
void quad_poly_solver(int nblock, // matrix block size
int ncoeff, // number of Chebyshev polynomials
double res_tol, // desired residual tolerance for convergence
double hwt, // Hamiltonian coefficient for scaled Hamiltonian matrix
double owt, // overlap coefficient for scaled Hamiltonian matrix
double *coeff, // density coefficients for Chebyshev polynomials [ncoeff]
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrices [1]
double **hamiltonian, // Hamiltonian matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **overlap, // overlap matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **density, // restricted density matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **response) // restricted response matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
int num_matvec = 0;
// zero density & response block-sparse matrices
zero_mat(nblock,sparsity,density);
zero_mat(nblock,sparsity,response);
// allocate memory for block vectors
int ndata = sparsity->nrow*nblock*nblock;
double *src_vec = (double*)malloc(sizeof(double)*ndata);
double *density_vec = (double*)malloc(sizeof(double)*ndata);
double *response_vec = (double*)malloc(sizeof(double)*ndata);
double *work = (double*)malloc(sizeof(double)*6*ndata);
// shift & scale the Hamiltonian to bound its spectrum within [-1,1]
scale_mat(nblock,sparsity,hwt,hamiltonian);
add_mat(nblock,sparsity,owt,overlap,hamiltonian);
// loop over block columns of the density & response matrices being constructed
for(int i=0 ; i<sparsity->ncol ; i++)
{
// setup a block column of basis vectors
zero_vec(nblock,sparsity->nrow,src_vec);
for(int j=0 ; j<nblock ; j++) { src_vec[j+(j+i*nblock)*nblock] = 1.0; }
// construct a block column of the density & response matrices
num_matvec += chebyshev_mat(nblock,ncoeff,res_tol,hwt,owt,coeff,sparsity,hamiltonian,overlap,src_vec,density_vec,
response_vec,work);
// retain terms for the block-sparse density & response matrices
add_col(nblock,sparsity,i,0.5,density_vec,density);
add_row(nblock,sparsity,i,0.5,density_vec,density);
add_col(nblock,sparsity,i,0.5,response_vec,response);
add_row(nblock,sparsity,i,0.5,response_vec,response);
}
// unshift & unscale the Hamiltonian
add_mat(nblock,sparsity,-owt,overlap,hamiltonian);
scale_mat(nblock,sparsity,1.0/hwt,hamiltonian);
printf("> # of mat-vecs = %d\n",num_matvec);
// deallocate memory
free(work);
free(response_vec);
free(density_vec);
free(src_vec);
}
// Quadratic-scaling solver based on rational approximation of the Fermi-Dirac distribution
void quad_rational_solver(int nblock, // matrix block size
int npole, // number of pole pairs in the rational approximation
double res_tol, // desired residual tolerance for convergence
double complex *w, // rational approximation residues [npole]
double complex *z, // rational approximation poles [npole]
struct pattern *sparsity, // contains the sparsity pattern of the matrices [1]
double **hamiltonian, // Hamiltonian matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **overlap, // overlap matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **density, // restricted density matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **response) // restricted response matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
int num_matvec = 0;
// zero density & response block-sparse matrices
zero_mat(nblock,sparsity,density);
zero_mat(nblock,sparsity,response);
// allocate memory for block vectors
int ndata = sparsity->nrow*nblock*nblock;
double *src_vec = (double*)malloc(sizeof(double)*ndata);
double *density_vec = (double*)malloc(sizeof(double)*ndata);
double *response_vec = (double*)malloc(sizeof(double)*ndata);
double *work = (double*)malloc(sizeof(double)*10*ndata);
// loop over block columns of the density & response matrices being constructed
for(int i=0 ; i<sparsity->ncol ; i++)
{
// setup a block column of basis vectors
zero_vec(nblock,sparsity->nrow,src_vec);
for(int j=0 ; j<nblock ; j++) { src_vec[j+(j+i*nblock)*nblock] = 1.0; }
// construct a block column of the density & response matrices
num_matvec += rational_mat(nblock,npole,res_tol,w,z,sparsity,hamiltonian,overlap,src_vec,density_vec,response_vec,work);
// retain terms for the block-sparse density & response matrices
add_col(nblock,sparsity,i,0.5,density_vec,density);
add_row(nblock,sparsity,i,0.5,density_vec,density);
add_col(nblock,sparsity,i,0.5,response_vec,response);
add_row(nblock,sparsity,i,0.5,response_vec,response);
}
printf("> # of mat-vecs = %d\n",num_matvec);
// deallocate memory
free(work);
free(response_vec);
free(density_vec);
free(src_vec);
}
// Localized solver based on polynomial approximation of the Fermi-Dirac distribution
void local_poly_solver(int nblock, // matrix block size
int ncoeff, // number of Chebyshev polynomials
double res_tol, // desired residual tolerance for convergence
double hwt, // Hamiltonian coefficient for scaled Hamiltonian matrix
double owt, // overlap coefficient for scaled Hamiltonian matrix
double *coeff, // density coefficients for Chebyshev polynomials [ncoeff]
struct pattern *locality, // contains the localization pattern [1]
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrices [1]
double **hamiltonian, // Hamiltonian matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **overlap, // overlap matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **density, // restricted density matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **response) // restricted response matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
int num_matvec = 0;
// zero density & response block-sparse matrices
zero_mat(nblock,sparsity,density);
zero_mat(nblock,sparsity,response);
// calculate largest local problem dimension
int nlocal_max = 0;
for(int i=0 ; i<locality->ncol ; i++)
{ nlocal_max = MAX(nlocal_max,(locality->col[i+1]-locality->col[i])); }
// allocate memory for block vectors
int ndata = nlocal_max*nblock*nblock;
double *src_vec = (double*)malloc(sizeof(double)*ndata);
double *density_vec = (double*)malloc(sizeof(double)*ndata);
double *response_vec = (double*)malloc(sizeof(double)*ndata);
double *work = (double*)malloc(sizeof(double)*6*ndata);
// allocate memory for localized matrices
double **local_hamiltonian = (double**)malloc(sizeof(double*)*sparsity->col[sparsity->ncol]);
double **local_overlap = (double**)malloc(sizeof(double*)*sparsity->col[sparsity->ncol]);
double **local_density = (double**)malloc(sizeof(double*)*sparsity->col[sparsity->ncol]);
double **local_response = (double**)malloc(sizeof(double*)*sparsity->col[sparsity->ncol]);
// shift & scale the Hamiltonian to bound its spectrum within [-1,1]
scale_mat(nblock,sparsity,hwt,hamiltonian);
add_mat(nblock,sparsity,owt,overlap,hamiltonian);
// loop over block columns of the density & response matrices being constructed
double ave_sparsity = 0.0;
for(int i=0 ; i<sparsity->ncol ; i++)
{
// create local versions of all relevant matrices
struct pattern local_sparsity;
int nlocal = locality->col[i+1]-locality->col[i];
int *local = &(locality->row[locality->col[i]]);
localize_pattern(nlocal,local,sparsity,&local_sparsity);
localize_mat(nlocal,local,sparsity,hamiltonian,local_hamiltonian);
localize_mat(nlocal,local,sparsity,overlap,local_overlap);
localize_mat(nlocal,local,sparsity,density,local_density);
localize_mat(nlocal,local,sparsity,response,local_response);
int local_i = -1;
for(int j=0 ; j<nlocal ; j++)
{ if(locality->row[j+locality->col[i]] == i) { local_i = j; } }
ave_sparsity += (double)local_sparsity.col[local_sparsity.ncol]/(double)local_sparsity.ncol;
// setup source vector
zero_vec(nblock,local_sparsity.nrow,src_vec);
for(int j=0 ; j<nblock ; j++) { src_vec[j+(j+local_i*nblock)*nblock] = 1.0; }
// construct a block column of the density & response matrices
num_matvec += chebyshev_mat(nblock,ncoeff,res_tol,hwt,owt,coeff,&local_sparsity,local_hamiltonian,local_overlap,src_vec,
density_vec,response_vec,work);
// retain terms for the block-sparse density & response matrices
add_col(nblock,&local_sparsity,local_i,0.5,density_vec,local_density);
add_row(nblock,&local_sparsity,local_i,0.5,density_vec,local_density);
add_col(nblock,&local_sparsity,local_i,0.5,response_vec,local_response);
add_row(nblock,&local_sparsity,local_i,0.5,response_vec,local_response);
// deallocate the temporary local sparsity pattern
free_pattern(&local_sparsity);
}
// unshift & unscale the Hamiltonian
add_mat(nblock,sparsity,-owt,overlap,hamiltonian);
scale_mat(nblock,sparsity,1.0/hwt,hamiltonian);
printf("average local H/S sparsity = %lf\n",(double)ave_sparsity/(double)sparsity->ncol);
printf("# of mat-vecs = %d\n",num_matvec);
// deallocate memory
free(local_response);
free(local_density);
free(local_overlap);
free(local_hamiltonian);
free(work);
free(response_vec);
free(density_vec);
free(src_vec);
}
// Localized solver based on polynomial approximation of the Fermi-Dirac distribution
void local_rational_solver(int nblock, // matrix block size
int npole, // number of Chebyshev polynomials
double res_tol, // desired residual tolerance for convergence
double complex *w, // rational approximation residues [npole]
double complex *z, // rational approximation poles [npole]
struct pattern *locality, // contains the localization pattern [1]
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrices [1]
double **hamiltonian, // Hamiltonian matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **overlap, // overlap matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **density, // restricted density matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **response) // restricted response matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
int num_matvec = 0;
// zero density & response block-sparse matrices
zero_mat(nblock,sparsity,density);
zero_mat(nblock,sparsity,response);
// calculate largest local problem dimension
int nlocal_max = 0;
for(int i=0 ; i<locality->ncol ; i++)
{ nlocal_max = MAX(nlocal_max,(locality->col[i+1]-locality->col[i])); }
// allocate memory for block vectors
int ndata = nlocal_max*nblock*nblock;
double *src_vec = (double*)malloc(sizeof(double)*ndata);
double *density_vec = (double*)malloc(sizeof(double)*ndata);
double *response_vec = (double*)malloc(sizeof(double)*ndata);
double *work = (double*)malloc(sizeof(double)*10*ndata);
// allocate memory for localized matrices
double **local_hamiltonian = (double**)malloc(sizeof(double*)*sparsity->col[sparsity->ncol]);
double **local_overlap = (double**)malloc(sizeof(double*)*sparsity->col[sparsity->ncol]);
double **local_density = (double**)malloc(sizeof(double*)*sparsity->col[sparsity->ncol]);
double **local_response = (double**)malloc(sizeof(double*)*sparsity->col[sparsity->ncol]);
// loop over block columns of the density & response matrices being constructed
double ave_sparsity = 0.0;
for(int i=0 ; i<sparsity->ncol ; i++)
{
// create local versions of all relevant matrices
struct pattern local_sparsity;
int nlocal = locality->col[i+1]-locality->col[i];
int *local = &(locality->row[locality->col[i]]);
localize_pattern(nlocal,local,sparsity,&local_sparsity);
localize_mat(nlocal,local,sparsity,hamiltonian,local_hamiltonian);
localize_mat(nlocal,local,sparsity,overlap,local_overlap);
localize_mat(nlocal,local,sparsity,density,local_density);
localize_mat(nlocal,local,sparsity,response,local_response);
int local_i = -1;
for(int j=0 ; j<nlocal ; j++)
{ if(locality->row[j+locality->col[i]] == i) { local_i = j; } }
ave_sparsity += (double)local_sparsity.col[local_sparsity.ncol]/(double)local_sparsity.ncol;
// setup source vector
zero_vec(nblock,local_sparsity.nrow,src_vec);
for(int j=0 ; j<nblock ; j++) { src_vec[j+(j+local_i*nblock)*nblock] = 1.0; }
// construct a block column of the density & response matrices
num_matvec += rational_mat(nblock,npole,res_tol,w,z,&local_sparsity,local_hamiltonian,local_overlap,src_vec,density_vec,
response_vec,work);
// retain terms for the block-sparse density & response matrices
add_col(nblock,&local_sparsity,local_i,0.5,density_vec,local_density);
add_row(nblock,&local_sparsity,local_i,0.5,density_vec,local_density);
add_col(nblock,&local_sparsity,local_i,0.5,response_vec,local_response);
add_row(nblock,&local_sparsity,local_i,0.5,response_vec,local_response);
// deallocate the temporary local sparsity pattern
free_pattern(&local_sparsity);
}
printf("average local H/S sparsity = %lf\n",(double)ave_sparsity/(double)sparsity->ncol);
printf("# of mat-vecs = %d\n",num_matvec);
// deallocate memory
free(local_response);
free(local_density);
free(local_overlap);
free(local_hamiltonian);
free(work);
free(response_vec);
free(density_vec);
free(src_vec);
}
// Random solver based on polynomial approximation of the Fermi-Dirac distribution
void random_poly_solver(int nblock, // matrix block size
int ncoeff, // number of Chebyshev polynomials
int ncolor, // number of atom colors
int nsample, // number of random samples
int seed, // PRNG seed
double res_tol, // desired residual tolerance for convergence
double hwt, // Hamiltonian coefficient for scaled Hamiltonian matrix
double owt, // overlap coefficient for scaled Hamiltonian matrix
double *coeff, // density coefficients for Chebyshev polynomials [ncoeff]
int *color, // list of color offsets for the atom_ptr list [ncolor+1]
int *atom_ptr, // list of atoms of each color [color[ncolor]]
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrices [1]
double **hamiltonian, // Hamiltonian matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **overlap, // overlap matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **density, // restricted density matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **response) // restricted response matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
int num_matvec = 0;
// seed the solver on entry for deterministic performance
random64(seed);
// zero density & response block-sparse matrices
zero_mat(nblock,sparsity,density);
zero_mat(nblock,sparsity,response);
// allocate memory for block vectors
int ndata = sparsity->nrow*nblock*nblock;
double *rng_vec = (double*)malloc(sizeof(double)*2*ndata);
double *rng_vec_real = rng_vec, *rng_vec_imag = &(rng_vec[ndata]);
double *density_vec = (double*)malloc(sizeof(double)*ndata);
double *response_vec = (double*)malloc(sizeof(double)*ndata);
double *work = (double*)malloc(sizeof(double)*6*ndata);
// shift & scale the Hamiltonian to bound its spectrum within [-1,1]
scale_mat(nblock,sparsity,hwt,hamiltonian);
add_mat(nblock,sparsity,owt,overlap,hamiltonian);
// loop over block columns of the density & response matrices being constructed
for(int i=0 ; i<nsample ; i++)
{
for(int j=0 ; j<ncolor ; j++)
{
// construct a random block source vector
random_vec(nblock,sparsity->nrow,color[j+1]-color[j],&(atom_ptr[color[j]]),rng_vec);
// construct a block column of the density & response matrices for the real part
num_matvec += chebyshev_mat(nblock,ncoeff,res_tol,hwt,owt,coeff,sparsity,hamiltonian,overlap,rng_vec_real,density_vec,
response_vec,work);
// add contributions to the (symmetric) density and response matrices
restrict_outvec(nblock,sparsity,density_vec,rng_vec_real,density);
restrict_outvec(nblock,sparsity,rng_vec_real,density_vec,density);
restrict_outvec(nblock,sparsity,response_vec,rng_vec_real,response);
restrict_outvec(nblock,sparsity,rng_vec_real,response_vec,response);
// construct a block column of the density & response matrices for the imaginary part
num_matvec += chebyshev_mat(nblock,ncoeff,res_tol,hwt,owt,coeff,sparsity,hamiltonian,overlap,rng_vec_imag,density_vec,
response_vec,work);
// add contributions to the (symmetric) density and response matrices
restrict_outvec(nblock,sparsity,density_vec,rng_vec_imag,density);
restrict_outvec(nblock,sparsity,rng_vec_imag,density_vec,density);
restrict_outvec(nblock,sparsity,response_vec,rng_vec_imag,response);
restrict_outvec(nblock,sparsity,rng_vec_imag,response_vec,response);
}
}
// unshift & unscale the Hamiltonian
add_mat(nblock,sparsity,-owt,overlap,hamiltonian);
scale_mat(nblock,sparsity,1.0/hwt,hamiltonian);
// average the density and response matrices (0.5 factor averages the symmetrization)
scale_mat(nblock,sparsity,0.5/(double)nsample,density);
scale_mat(nblock,sparsity,0.5/(double)nsample,response);
printf("# of mat-vecs = %d\n",num_matvec);
// deallocate memory
free(work);
free(response_vec);
free(density_vec);
free(rng_vec);
}
// Random solver based on polynomial approximation of the Fermi-Dirac distribution
void random_rational_solver(int nblock, // matrix block size
int npole, // number of Chebyshev polynomials
int ncolor, // number of atom colors
int nsample, // number of random samples
int seed, // PRNG seed
double res_tol, // desired residual tolerance for convergence
double complex *w, // rational approximation residues [npole]
double complex *z, // rational approximation poles [npole]
int *color, // list of color offsets for the atom_ptr list [ncolor+1]
int *atom_ptr, // list of atoms of each color [color[ncolor]]
struct pattern *sparsity, // contains the sparsity pattern & dimensions of the matrices [1]
double **hamiltonian, // Hamiltonian matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **overlap, // overlap matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **density, // restricted density matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **response) // restricted response matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
int num_matvec = 0;
// seed the solver on entry for deterministic performance
random64(seed);
// zero density & response block-sparse matrices
zero_mat(nblock,sparsity,density);
zero_mat(nblock,sparsity,response);
// allocate memory for block vectors
int ndata = sparsity->nrow*nblock*nblock;
double *rng_vec = (double*)malloc(sizeof(double)*2*ndata);
double *rng_vec_real = rng_vec, *rng_vec_imag = &(rng_vec[ndata]);
double *density_vec = (double*)malloc(sizeof(double)*ndata);
double *response_vec = (double*)malloc(sizeof(double)*ndata);
double *work = (double*)malloc(sizeof(double)*10*ndata);
// loop over block columns of the density & response matrices being constructed
for(int i=0 ; i<nsample ; i++)
{
for(int j=0 ; j<ncolor ; j++)
{
// construct a random block source vector
random_vec(nblock,sparsity->nrow,color[j+1]-color[j],&(atom_ptr[color[j]]),rng_vec);
// construct a block column of the density & response matrices for the real part
num_matvec += rational_mat(nblock,npole,res_tol,w,z,sparsity,hamiltonian,overlap,rng_vec_real,density_vec,response_vec,
work);
// add contributions to the (symmetric) density and response matrices
restrict_outvec(nblock,sparsity,density_vec,rng_vec_real,density);
restrict_outvec(nblock,sparsity,rng_vec_real,density_vec,density);
restrict_outvec(nblock,sparsity,response_vec,rng_vec_real,response);
restrict_outvec(nblock,sparsity,rng_vec_real,response_vec,response);
// construct a block column of the density & response matrices for the imaginary part
num_matvec += rational_mat(nblock,npole,res_tol,w,z,sparsity,hamiltonian,overlap,rng_vec_imag,density_vec,response_vec,
work);
// add contributions to the (symmetric) density and response matrices
restrict_outvec(nblock,sparsity,density_vec,rng_vec_imag,density);
restrict_outvec(nblock,sparsity,rng_vec_imag,density_vec,density);
restrict_outvec(nblock,sparsity,response_vec,rng_vec_imag,response);
restrict_outvec(nblock,sparsity,rng_vec_imag,response_vec,response);
}
}
// average the density and response matrices (0.5 factor averages the symmetrization)
scale_mat(nblock,sparsity,0.5/(double)nsample,density);
scale_mat(nblock,sparsity,0.5/(double)nsample,response);
printf("# of mat-vecs = %d\n",num_matvec);
// deallocate memory
free(work);
free(response_vec);
free(density_vec);
free(rng_vec);
}
// Infinite-crystal solver based on rational approximation of the Fermi-Dirac distribution
void infinite_rational_solver(int nblock, // matrix block size
int npole, // number of pole pairs in the rational approximation
double res_tol, // desired residual tolerance for convergence
double *atom, // atomic coordinates [3*sparsity->nrow]
double complex *w, // rational approximation residues [npole]
double complex *z, // rational approximation poles [npole]
struct pattern *sparsity, // contains the sparsity pattern of the matrices [1]
double **hamiltonian, // Hamiltonian matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **overlap, // overlap matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **density, // restricted density matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **response) // restricted response matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
int num_matvec = 0;
// zero density & response block-sparse matrices
zero_mat(nblock,sparsity,density);
zero_mat(nblock,sparsity,response);
// allocate memory for block vectors
int ndata = sparsity->nrow*nblock*nblock;
double *src_vec = (double*)malloc(sizeof(double)*2*ndata);
double *src_vec_real = src_vec, *src_vec_imag = &(src_vec[ndata]);
double *density_vec = (double*)malloc(sizeof(double)*2*ndata);
double *response_vec = (double*)malloc(sizeof(double)*2*ndata);
double *work = (double*)malloc(sizeof(double)*10*ndata);
// setup the block column of basis vectors
zero_vec(nblock,2*sparsity->nrow,src_vec);
for(int i=0 ; i<nblock ; i++) { src_vec[i+i*nblock] = 1.0; }
// construct the single independent block column of the density & response matrices
num_matvec += rational_mat(nblock,npole,res_tol,w,z,sparsity,hamiltonian,overlap,src_vec,density_vec,response_vec,work);
// multiply the (0,0) block by 0.5 to avoid double-counting
for(int i=0 ; i<nblock*nblock ; i++) { density_vec[i] *= 0.5; response_vec[i] *= 0.5; }
// retain terms for the block-sparse density & response matrices
add_col(nblock,sparsity,0,1.0,density_vec,density);
add_row(nblock,sparsity,0,1.0,density_vec,density);
add_col(nblock,sparsity,0,1.0,response_vec,response);
add_row(nblock,sparsity,0,1.0,response_vec,response);
printf("> # of mat-vecs = %d\n",num_matvec);
// calculate the Frobenius-norm of matrix blocks for density & response matrices
FILE *decay_file = fopen("decay.out","w");
fprintf(decay_file,"%d\n",sparsity->nrow);
for(int i=0 ; i<sparsity->nrow ; i++)
{
double dist = A0*distance(&(atom[0]),&(atom[3*i])), density_norm, response_norm;
dot_vec(1,nblock*nblock,&(density_vec[i*nblock*nblock]),&(density_vec[i*nblock*nblock]),&density_norm);
dot_vec(1,nblock*nblock,&(response_vec[i*nblock*nblock]),&(response_vec[i*nblock*nblock]),&response_norm);
fprintf(decay_file,"%e %e %e\n",dist,sqrt(density_norm),sqrt(response_norm));
}
fclose(decay_file);
// deallocate memory
free(work);
free(response_vec);
free(density_vec);
free(src_vec);
}
// Infinite-crystal solver based on reciprocal-space decomposition of the eigenvalue problem (band structure)
// NOTE: this function must be modified if MKL_Complex16 differs from its MKL specification
// NOTE: this function does not have any threading & it is not meant to have high performance
void infinite_reciprocal_solver(int nblock, // matrix block size
double potential, // chemical potential of the system
double temperature, // temperature of the system
double min_energy, // minimum energy for density-of-states plot
double max_energy, // maximum energy for density-of-states plot
int ngrid, // number of k-space grid points per dimension
int *latvec, // list of lattice vectors [3*sparsity->nrow]
double *atom, // atomic coordinates [3*sparsity->nrow]
struct pattern *sparsity, // contains the sparsity pattern of the matrices [1]
double **hamiltonian, // Hamiltonian matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **overlap, // overlap matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **density, // restricted density matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **response) // restricted response matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
// zero density & response block-sparse matrices
zero_mat(nblock,sparsity,density);
zero_mat(nblock,sparsity,response);
// allocate memory for LAPACK solver
MKL_INT itype = 1, size = nblock, lwork = -1, info;
char jobz = 'V', uplo = 'U', transa = 'N', transb = 'C';
double *eigenvalue = (double*)malloc(sizeof(double)*size);
double *rwork = (double*)malloc(sizeof(double)*(3*size-2));
MKL_Complex16 *hamiltonian_k = (MKL_Complex16*)malloc(sizeof(MKL_Complex16)*size*size);
MKL_Complex16 *overlap_k = (MKL_Complex16*)malloc(sizeof(MKL_Complex16)*size*size);
MKL_Complex16 *density0 = (MKL_Complex16*)malloc(sizeof(MKL_Complex16)*size*size);
MKL_Complex16 *response0 = (MKL_Complex16*)malloc(sizeof(MKL_Complex16)*size*size);
MKL_Complex16 work0, one, zero;
zhegv(&itype,&jobz,&uplo,&size,hamiltonian_k,&size,overlap_k,&size,eigenvalue,&work0,&lwork,rwork,&info);
if(info != 0) { printf("ERROR: LAPACK zhegv (memory query) returned an error (%d)\n",info); MPI_Abort(MPI_COMM_WORLD,0); }
lwork = (int)(work0.real); // depends on details of MKL_Complex16
one.real = 1.0; one.imag = 0.0; // depends on details of MKL_Complex16
zero.real = 0.0; zero.imag = 0.0; // depends on details of MKL_Complex16
MKL_Complex16 *work = (MKL_Complex16*)malloc(sizeof(MKL_Complex16)*lwork);
// setup density-of-states information
int ndos = (int)(4.0*(max_energy-min_energy)/temperature);
double denergy = (max_energy-min_energy)/(double)(ndos-1);
double *dos = (double*)malloc(sizeof(double)*ndos);
double *dos_int = (double*)malloc(sizeof(double)*ndos);
for(int i=0 ; i<ndos ; i++) { dos[i] = dos_int[i] = 0.0; }
// allocate memory for block vectors
int ndata = sparsity->nrow*nblock*nblock;
double *density_vec = (double*)malloc(sizeof(double)*2*ndata);
double *response_vec = (double*)malloc(sizeof(double)*2*ndata);
// loop over k-points in each dimension
double wt = 1.0/(double)(ngrid*ngrid*ngrid);
double complex phase[3];
for(int i=0 ; i<ngrid ; i++)
{
phase[0] = I*2.0*M_PI*(double)i/(double)ngrid;
for(int j=0 ; j<ngrid ; j++)
{
phase[1] = I*2.0*M_PI*(double)j/(double)ngrid;
for(int k=0 ; k<ngrid ; k++)
{
phase[2] = I*2.0*M_PI*(double)k/(double)ngrid;
// construct reciprocal-space Hamiltonian & overlap matrices
for(int l=0 ; l<nblock*nblock ; l++)
{ hamiltonian_k[l].real = hamiltonian_k[l].imag = overlap_k[l].real = overlap_k[l].imag = 0.0; }
for(int l=0 ; l<sparsity->col[1] ; l++)
{
int ilat = sparsity->row[l];
double complex phase0 = phase[0]*latvec[3*ilat] + phase[1]*latvec[3*ilat+1] + phase[2]*latvec[3*ilat+2];
double complex exp_phase0 = cexp(phase0);
for(int m=0 ; m<nblock*nblock ; m++)
{
hamiltonian_k[m].real += creal(exp_phase0)*hamiltonian[l][m];
hamiltonian_k[m].imag += cimag(exp_phase0)*hamiltonian[l][m];
overlap_k[m].real += creal(exp_phase0)*overlap[l][m];
overlap_k[m].imag += cimag(exp_phase0)*overlap[l][m];
}
}
// diagonalize the complex-Hermitian Hamiltonian (LAPACK call)
zhegv(&itype,&jobz,&uplo,&size,hamiltonian_k,&size,overlap_k,&size,eigenvalue,work,&lwork,rwork,&info);
if(info != 0) { printf("ERROR: LAPACK zhegv returned an error (%d)\n",info); MPI_Abort(MPI_COMM_WORLD,0); }
// sparsely accumulate DOS contributions
for(int l=0 ; l<size ; l++)
{
int min_dos = MAX(0,(int)((eigenvalue[l] - min_energy - 20.0*temperature)/denergy));
int max_dos = MIN(ndos-1,(int)((eigenvalue[l] - min_energy + 20.0*temperature)/denergy));
for(int m=min_dos ; m<=max_dos ; m++)
{
double dos_energy = min_energy + (double)m*denergy;
dos_int[m] += wt*2.0*(fermi((eigenvalue[l] - dos_energy)/temperature)
- fermi((eigenvalue[l] - dos_energy + denergy)/temperature));
dos[m] -= wt*2.0*dfermi_dx((eigenvalue[l] - dos_energy)/temperature)/temperature;
}
}
// construct k-point contribution to real-space density matrix
for(int l=0 ; l<nblock ; l++)
{
double func = 2.0*fermi((eigenvalue[l] - potential)/temperature);
for(int m=0 ; m<nblock ; m++)
{
overlap_k[m+l*nblock].real = func*hamiltonian_k[m+l*nblock].real;
overlap_k[m+l*nblock].imag = func*hamiltonian_k[m+l*nblock].imag;
}
}
zgemm(&transa,&transb,&size,&size,&size,&one,hamiltonian_k,&size,overlap_k,&size,&zero,density0,&size);
// construct k-point contribution to real-space response matrix
for(int l=0 ; l<nblock ; l++)
{
double func = -2.0*eigenvalue[l]*fermi((eigenvalue[l] - potential)/temperature);
for(int m=0 ; m<nblock ; m++)
{
overlap_k[m+l*nblock].real = func*hamiltonian_k[m+l*nblock].real;
overlap_k[m+l*nblock].imag = func*hamiltonian_k[m+l*nblock].imag;
}
}
zgemm(&transa,&transb,&size,&size,&size,&one,hamiltonian_k,&size,overlap_k,&size,&zero,response0,&size);
// accumulate contributions to real-space density & response matrices
for(int l=0 ; l<nblock*nblock ; l++)
{
density[0][l] += wt*density0[l].real;
response[0][l] += wt*response0[l].real;
}
for(int l=1 ; l<sparsity->col[1] ; l++)
{
int ilat = sparsity->row[l];
double complex phase0 = phase[0]*latvec[3*ilat] + phase[1]*latvec[3*ilat+1] + phase[2]*latvec[3*ilat+2];
double complex exp_phase0 = cexp(-phase0);
for(int m=0 ; m<nblock*nblock ; m++)
{
double complex density00 = density0[m].real + I*density0[m].imag;
double complex response00 = response0[m].real + I*response0[m].imag;
density[l][m] += wt*creal(exp_phase0*density00);
response[l][m] += wt*creal(exp_phase0*response00);
density[sparsity->col[ilat]][m] += wt*creal(conj(exp_phase0)*density00);
response[sparsity->col[ilat]][m] += wt*creal(conj(exp_phase0)*response00);
}
}
// accumulate contributions to extended real-space density & response matrices (likely bottleneck for intended use)
#pragma omp parallel for
for(int l=0 ; l<sparsity->nrow ; l++)
{
double complex phase0 = phase[0]*latvec[3*l] + phase[1]*latvec[3*l+1] + phase[2]*latvec[3*l+2];
double complex exp_phase0 = cexp(-phase0);
int offset = l*nblock*nblock;
for(int m=0 ; m<nblock*nblock ; m++)
{
double complex density00 = density0[m].real + I*density0[m].imag;
double complex response00 = response0[m].real + I*response0[m].imag;
density_vec[offset+m] += wt*creal(exp_phase0*density00);
response_vec[offset+m] += wt*creal(exp_phase0*response00);
}
}
}
}
}
// print both the DOS and accumulated DOS
FILE* dos_file = fopen("dos.out","w");
double inc_energy = (double)(ndos-1)/((max_energy-min_energy)*E0);
double acc_dos = 0.0;
for(int i=0 ; i<ndos ; i++)
{
acc_dos += dos_int[i];
double dos_energy = min_energy + (max_energy-min_energy)*(double)i/(double)(ndos-1);
fprintf(dos_file,"%e %e %e\n",dos_energy*E0,dos[i]/E0,acc_dos);
}
fclose(dos_file);
// calculate the Frobenius-norm of matrix blocks for density & response matrices
FILE *decay_file = fopen("decay.out","w");
fprintf(decay_file,"%d\n",sparsity->nrow);
for(int i=0 ; i<sparsity->nrow ; i++)
{
double dist = A0*distance(&(atom[0]),&(atom[3*i])), density_norm, response_norm;
dot_vec(1,nblock*nblock,&(density_vec[i*nblock*nblock]),&(density_vec[i*nblock*nblock]),&density_norm);
dot_vec(1,nblock*nblock,&(response_vec[i*nblock*nblock]),&(response_vec[i*nblock*nblock]),&response_norm);
fprintf(decay_file,"%e %e %e\n",dist,sqrt(density_norm),sqrt(response_norm));
}
fclose(decay_file);
// deallocate memory
free(response_vec);
free(density_vec);
free(dos_int);
free(dos);
free(work);
free(response0);
free(density0);
free(overlap_k);
free(hamiltonian_k);
free(rwork);
free(eigenvalue);
}
// test out the effects of localization on Green's function accuracy and CG condition numbers
void infinite_pre_tester(int nblock, // matrix block size
int nradius, // number of localization radii to test
double min_radius, // minimum localization radius
double max_radius, // maximum localization radius
double res_tol, // desired residual tolerance for convergence
double complex z0, // complex energy shift for the iterative solve
double complex z1, // complex energy shift for the preconditioner
struct pattern *sparsity, // contains the sparsity pattern of the matrices [1]
double *atom, // atomic coordinates to define new sparsity patterns [3*sparsity->nrow]
int *latvec, // ordered lattice vector list [3*sparsity->nrow]
double **hamiltonian, // Hamiltonian matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
double **overlap) // overlap matrix [sparsity->col[sparsity->ncol]][nblock*nblock]
{
// allocate memory for block vectors
int ndata = sparsity->nrow*nblock*nblock;
double *rhs = (double*)malloc(sizeof(double)*2*ndata);
double *rhs_real = rhs, *rhs_imag = &(rhs[ndata]);
double *x0 = (double*)malloc(sizeof(double)*2*ndata);
double *x0_real = x0, *x0_imag = &(x0[ndata]);
double *x1 = (double*)malloc(sizeof(double)*2*ndata);
double *x1_real = x1, *x1_imag = &(x1[ndata]);
double *x2 = (double*)malloc(sizeof(double)*2*ndata);
double *work = (double*)malloc(sizeof(double)*8*ndata);
zero_vec(nblock,2*sparsity->nrow,rhs);
for(int i=0 ; i<nblock ; i++) { rhs[i+i*nblock] = 1.0; }
// benchmark time for an overlap matrix solve
printf("> iterative overlap inversion\n");
double time_before = omp_get_wtime();
copy_vec(nblock,sparsity->nrow,rhs,x0);
int niter = spd_inv(nblock,sparsity,res_tol,overlap,x0,work);
double time_after = omp_get_wtime();
printf(">> number of iterations = %d\n",niter);
printf(">> time usage = %e s\n",time_after-time_before);
// prepare the solutions for use in the sparse approximate inverse (avoid double-counting (0,0) matrix block)
for(int i=0 ; i<nblock*nblock ; i++) { x0[i] *= 0.5; }
// for each radius, construct a sparse approximate inverse overlap matrix
for(int i=0 ; i<nradius ; i++)
{
double x = (double)i/(double)(nradius-1);
double radius = (1.0 - x)*min_radius + x*max_radius;
printf("> inverse sparsity radius = %lf\n",A0*radius);
// allocate memory for block-sparse inverse overlap matrix
struct pattern inv_sparsity;
neighbor_list(sparsity->nrow,atom,radius,&inv_sparsity);
int nnz = inv_sparsity.col[inv_sparsity.ncol];
double **inverse = (double**)malloc(sizeof(double*)*nnz);
crystal_malloc(nblock,&inv_sparsity,latvec,inverse);
// setup inverse matrix elements
zero_mat(nblock,&inv_sparsity,inverse);
add_col(nblock,&inv_sparsity,0,1.0,x0,inverse);
add_row(nblock,&inv_sparsity,0,1.0,x0,inverse);
// benchmark the time of applying the inverse
time_before = omp_get_wtime();
mat_vec(nblock,&inv_sparsity,1.0,0.0,inverse,rhs,x1);
time_after = omp_get_wtime();
printf(">> time usage = %e s\n",time_after-time_before);
// calculate the residual of the inverse
copy_vec(nblock,sparsity->nrow,rhs,x2);
mat_vec(nblock,sparsity,-1.0,1.0,overlap,x1,x2);
double res[NBLOCK_MAX], res_max;
dot_vec(nblock,sparsity->nrow,x2,x2,res);
res_max = res[0];
for(int j=1 ; j<nblock ; j++)
{ if(res[j] > res_max) { res_max = res[j]; } }
printf(">> residual error = %e\n",sqrt(res_max));
// deallocate loop memory
free(inverse[0]);
free(inverse);
free_pattern(&inv_sparsity);
}
// solve the larger imaginary shift first
printf("> preconditioner construction\n");
time_before = omp_get_wtime();
copy_vec(nblock,2*sparsity->nrow,rhs,x1);
niter = cgls_inv(nblock,sparsity,NULL,z1,res_tol,hamiltonian,overlap,NULL,NULL,rhs_real,rhs_imag,x1,work);
time_after = omp_get_wtime();
printf(">> number of iterations = %d\n",niter);
printf(">> time usage = %e s\n",time_after-time_before);
printf(">> operational condition number = %lf\n",condition_number(res_tol,niter));
// solve the small imaginary shift
printf("> unpreconditioned solve\n");
time_before = omp_get_wtime();
copy_vec(nblock,2*sparsity->nrow,rhs,x0);
niter = cgls_inv(nblock,sparsity,NULL,z0,res_tol,hamiltonian,overlap,NULL,NULL,rhs_real,rhs_imag,x0,work);
time_after = omp_get_wtime();
printf(">> number of iterations = %d\n",niter);
printf(">> time usage = %e s\n",time_after-time_before);
printf(">> operational condition number = %lf\n",condition_number(res_tol,niter));
// prepare the solutions for use in the preconditioner (avoid double-counting (0,0) matrix block)
for(int i=0 ; i<nblock*nblock ; i++) { x0_real[i] *= 0.5; x0_imag[i] *= 0.5; x1_real[i] *= 0.5; x1_imag[i] *= 0.5; }
// for each radius, construct a preconditioner from each solution
for(int i=0 ; i<nradius ; i++)
{
double x = (double)i/(double)(nradius-1);
double radius = (1.0 - x)*min_radius + x*max_radius;
printf("> preconditioner radius = %lf\n",A0*radius);
// allocate memory for block-sparse shifted inverse matrix
struct pattern pre_sparsity;
neighbor_list(sparsity->nrow,atom,radius,&pre_sparsity);
int nnz = pre_sparsity.col[pre_sparsity.ncol];
double **inverse_real = (double**)malloc(sizeof(double*)*nnz);
double **inverse_imag = (double**)malloc(sizeof(double*)*nnz);
crystal_malloc(nblock,&pre_sparsity,latvec,inverse_real);
crystal_malloc(nblock,&pre_sparsity,latvec,inverse_imag);
// setup shifted preconditioner
zero_mat(nblock,&pre_sparsity,inverse_real);
zero_mat(nblock,&pre_sparsity,inverse_imag);
add_col(nblock,&pre_sparsity,0,1.0,x1_real,inverse_real);
add_row(nblock,&pre_sparsity,0,1.0,x1_real,inverse_real);
add_col(nblock,&pre_sparsity,0,1.0,x1_imag,inverse_imag);
add_row(nblock,&pre_sparsity,0,1.0,x1_imag,inverse_imag);
time_before = omp_get_wtime();
copy_vec(nblock,2*sparsity->nrow,rhs,x2);
niter = cgls_inv(nblock,sparsity,&pre_sparsity,z0,res_tol,hamiltonian,overlap,inverse_real,inverse_imag,rhs_real,rhs_imag,
x2,work);
time_after = omp_get_wtime();
printf(">> number of iterations (shifted) = %d\n",niter);
printf(">> time usage (shifted) = %e s\n",time_after-time_before);
printf(">> operational condition number (shifted) = %lf\n",condition_number(res_tol,niter));
// setup self preconditioner
zero_mat(nblock,&pre_sparsity,inverse_real);
zero_mat(nblock,&pre_sparsity,inverse_imag);
add_col(nblock,&pre_sparsity,0,1.0,x0_real,inverse_real);
add_row(nblock,&pre_sparsity,0,1.0,x0_real,inverse_real);
add_col(nblock,&pre_sparsity,0,1.0,x0_imag,inverse_imag);
add_row(nblock,&pre_sparsity,0,1.0,x0_imag,inverse_imag);
time_before = omp_get_wtime();
copy_vec(nblock,2*sparsity->nrow,rhs,x2);
niter = cgls_inv(nblock,sparsity,&pre_sparsity,z0,res_tol,hamiltonian,overlap,inverse_real,inverse_imag,rhs_real,rhs_imag,
x2,work);
time_after = omp_get_wtime();
printf(">> number of iterations (self) = %d\n",niter);
printf(">> time usage (self) = %e s\n",time_after-time_before);
printf(">> operational condition number (self) = %lf\n",condition_number(res_tol,niter));
// deallocate loop memory
free(inverse_imag[0]);
free(inverse_real[0]);
free(inverse_imag);
free(inverse_real);
free_pattern(&pre_sparsity);
}
// deallocate local memory
free(work);
free(x2);
free(x1);
free(x0);
free(rhs);
}
//=========//
// 9. MAIN //
//=========//
int main(int argc, char** argv)
{
// MPI initialization
int mpirank, mpisize;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&mpirank);
MPI_Comm_size(MPI_COMM_WORLD,&mpisize);
// the vast majority of the program is performed by mpirank == 0 only
if(mpirank == 0)
{
// initial timing point
double time1 = omp_get_wtime();
// parse command-line input
int solver, natom, napprox, nsample, seed;
double temperature, potential, res_tol, pre_radius = 0.0, local_radius = 0.0;
// check for an appropriate number of command-line arguments
if(argc < 5)
{
printf("USAGE: <executable> <structure file> <chemical potential> <temperature> <solver> <solver parameters ...>\n");
MPI_Abort(MPI_COMM_WORLD,0);
}
// read the solver-independent input variables
sscanf(argv[2],"%lf",&potential);
sscanf(argv[3],"%lf",&temperature);
sscanf(argv[4],"%d",&solver);
// parse the atomic structure file
FILE *structure_file = fopen(argv[1],"r");
if(structure_file == NULL)
{ printf("ERROR: %s structure file not found\n",argv[1]); MPI_Abort(MPI_COMM_WORLD,0); }
fscanf(structure_file,"%d",&natom);
double *atom = (double*)malloc(sizeof(double)*3*natom);
for(int i=0 ; i<natom ; i++)
{
char element[16];
fscanf(structure_file,"%s",element);
if(strcmp(element,"Cu"))
{ printf("ERROR: Only element available is Cu (%s != Cu)\n",element); MPI_Abort(MPI_COMM_WORLD,0); }
for(int j=0 ; j<3 ; j++) { fscanf(structure_file,"%lf",&(atom[j+i*3])); }
if(feof(structure_file))
{ printf("ERROR: Not enough atoms in %s\n",argv[1]); MPI_Abort(MPI_COMM_WORLD,0); }
}
fclose(structure_file);
if((solver == 9 || solver == 10 || solver - 1) && natom < 4)
{ printf("ERROR: solver = 9 needs 4 atomic coordinates to define crystal lattice vectors\n"); MPI_Abort(MPI_COMM_WORLD,0); }
// only PEXSI actually uses multiple MPI processes, everything else should have one
if(solver != 2 && mpisize > 1)
{ printf("ERROR: only one MPI process should be used for solver != 2\n"); MPI_Abort(MPI_COMM_WORLD,0); }
// read the solver-dependent input variables
switch(solver)
{
case 0:
case 1:
{
// no solver parameters
} break;
case 2:
{
if(argc < 6)
{
printf("USAGE: <executable> <structure file> <chemical potential> <temperature> <solver> <#/2 of poles>\n");
MPI_Abort(MPI_COMM_WORLD,0);
}
sscanf(argv[5],"%d",&napprox);
} break;
case 3:
{
if(argc < 7)
{
printf("USAGE: <executable> <structure file> <chemical potential> <temperature> <solver> <# of Cheby.> "
"<res. tol.>\n");
MPI_Abort(MPI_COMM_WORLD,0);
}
sscanf(argv[5],"%d",&napprox);
sscanf(argv[6],"%lf",&res_tol);
} break;
case 4:
{
if(argc < 7)
{
printf("USAGE: <executable> <structure file> <chemical potential> <temperature> <solver> <#/2 of poles> "
"<res. tol.>\n");
MPI_Abort(MPI_COMM_WORLD,0);
}
sscanf(argv[5],"%d",&napprox);
sscanf(argv[6],"%lf",&res_tol);
} break;
case 5:
{
if(argc < 8)
{
printf("USAGE: <executable> <structure file> <chemical potential> <temperature> <solver> <# of Cheby.> <res. tol.> "
"<loc. rad.>\n");
MPI_Abort(MPI_COMM_WORLD,0);
}
sscanf(argv[5],"%d",&napprox);
sscanf(argv[6],"%lf",&res_tol);
sscanf(argv[7],"%lf",&local_radius);
} break;
case 6:
case 9:
{
if(argc < 8)
{
printf("USAGE: <executable> <structure file> <chemical potential> <temperature> <solver> <#/2 of poles> <res. tol.> "
"<loc. rad.>\n");
MPI_Abort(MPI_COMM_WORLD,0);
}
sscanf(argv[5],"%d",&napprox);
sscanf(argv[6],"%lf",&res_tol);
sscanf(argv[7],"%lf",&local_radius);
} break;
case 7:
{
if(argc < 10)
{
printf("USAGE: <executable> <structure file> <chemical potential> <temperature> <solver> <# of Cheby.> <res. tol.> "
"<loc. rad.> <seed> <# of samples>\n");
MPI_Abort(MPI_COMM_WORLD,0);
}
sscanf(argv[5],"%d",&napprox);
sscanf(argv[6],"%lf",&res_tol);
sscanf(argv[7],"%lf",&local_radius);
sscanf(argv[8],"%d",&seed);
sscanf(argv[9],"%d",&nsample);
if(seed <= 0)
{ printf("ERROR: PRNG seed must have positive nonzero value\n"); MPI_Abort(MPI_COMM_WORLD,0); }
} break;
case 8:
{
if(argc < 10)
{
printf("USAGE: <executable> <structure file> <chemical potential> <temperature> <solver> <#/2 of poles> <res. tol.> "
"<loc. rad.> <seed> <# of samples>\n");
MPI_Abort(MPI_COMM_WORLD,0);
}
sscanf(argv[5],"%d",&napprox);
sscanf(argv[6],"%lf",&res_tol);
sscanf(argv[7],"%lf",&local_radius);
sscanf(argv[8],"%d",&seed);
sscanf(argv[9],"%d",&nsample);
if(seed <= 0)
{ printf("ERROR: PRNG seed must have positive nonzero value\n"); MPI_Abort(MPI_COMM_WORLD,0); }
} break;
case 10:
{
if(argc < 7)
{
printf("USAGE: <executable> <structure file> <chemical potential> <temperature> <solver> "
"<# of k-grid pts. per dimension> <loc. rad.>\n");
MPI_Abort(MPI_COMM_WORLD,0);
}
sscanf(argv[5],"%d",&napprox);
sscanf(argv[6],"%lf",&local_radius);
} break;
case -1:
{
if(argc < 10)
{
printf("USAGE: <executable> <structure file> <chemical potential> <temperature> <solver> <res. tol.> "
"<min. rad.> <max. rad.> <# rad.>\n");
MPI_Abort(MPI_COMM_WORLD,0);
}
sscanf(argv[5],"%lf",&res_tol);
sscanf(argv[6],"%lf",&pre_radius);
sscanf(argv[7],"%lf",&local_radius);
sscanf(argv[8],"%d",&nsample);
} break;
default:
{
printf("ERROR: unknown solver, %d is not contained in { -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }\n",solver);
MPI_Abort(MPI_COMM_WORLD,0);
}
}
// convert from eV/Ang to Ry/Bohr
potential /= E0;
temperature /= E0;
for(int i=0 ; i<3*natom ; i++) { atom[i] /= A0; }
pre_radius /= A0;
local_radius /= A0;
// hard-coded tight-binding parameters for copper
struct nrl_tb param = define_copper();
// setup the ordered list of relevant lattice vectors
int *latvec;
double volume;
if(solver == 9 || solver == 10 || solver == -1)
{
if(local_radius < param.Rcut)
{
printf("WARNING: local radius is too small & truncates central-cell Hamiltonian (increased to %lf)\n",param.Rcut*A0);
local_radius = param.Rcut;
}
volume = cell_volume(atom);
natom = latvec_list(local_radius,&latvec,&atom);
printf("# of active atoms in crystal = %d\n",natom);
}
else
{ printf("# of atoms = %d\n",natom); }
// setup the block-sparse Hamiltonian, overlap, density, & response matrices
struct pattern sparsity;
neighbor_list(natom,atom,param.Rcut,&sparsity);
int nblock = 9, nnz = sparsity.col[sparsity.ncol];
double **hamiltonian = (double**)malloc(sizeof(double*)*nnz);
double **overlap = (double**)malloc(sizeof(double*)*nnz);
double **density = (double**)malloc(sizeof(double*)*nnz);
double **response = (double**)malloc(sizeof(double*)*nnz);
if( solver == 9 || solver == 10 || solver == -1 )
{
crystal_malloc(nblock,&sparsity,latvec,hamiltonian);
crystal_malloc(nblock,&sparsity,latvec,overlap);
crystal_malloc(nblock,&sparsity,latvec,density);
crystal_malloc(nblock,&sparsity,latvec,response);
// fill in only the first column explicitly (to avoid recomputing redundant matrix elements)
sparsity.ncol = 1;
tb_matrix(natom,atom,¶m,&sparsity,hamiltonian,overlap);
sparsity.ncol = natom;
// transposed copy to the first row, stored in memory after the first column
int nmem = nblock*nblock*(sparsity.col[1]-1);
for(int i=1 ; i<sparsity.col[1] ; i++)
{
for(int j=0 ; j<nblock ; j++)
for(int k=0 ; k<nblock ; k++)
{
hamiltonian[i][nmem+k+j*nblock] = hamiltonian[i][j+k*nblock];
overlap[i][nmem+k+j*nblock] = overlap[i][j+k*nblock];
}
}
}
else
{
hamiltonian[0] = (double*)malloc(sizeof(double)*nnz*nblock*nblock);
overlap[0] = (double*)malloc(sizeof(double)*nnz*nblock*nblock);
density[0] = (double*)malloc(sizeof(double)*nnz*nblock*nblock);
response[0] = (double*)malloc(sizeof(double)*nnz*nblock*nblock);
for(int i=1 ; i<nnz ; i++)
{
hamiltonian[i] = &(hamiltonian[i-1][nblock*nblock]);
overlap[i] = &(overlap[i-1][nblock*nblock]);
density[i] = &(density[i-1][nblock*nblock]);
response[i] = &(response[i-1][nblock*nblock]);
}
tb_matrix(natom,atom,¶m,&sparsity,hamiltonian,overlap);
}
printf("H & S sparsity = %lf\n",(double)sparsity.col[sparsity.ncol]/(double)sparsity.ncol);
// setup the sparsity pattern for a local region
struct pattern locality;
if(solver == 5 || solver == 6)
{
neighbor_list(natom,atom,local_radius,&locality);
printf("local sparsity = %lf\n",(double)locality.col[locality.ncol]/(double)locality.ncol);
}
// greedy coloring of adjacency matrix to define uncorrelated complex rotor ensemble
int ncolor, *color, *atom_ptr;
if(solver == 7 || solver == 8)
{
neighbor_list(natom,atom,local_radius,&locality);
color_graph(&locality,&ncolor,&color,&atom_ptr);
printf("total number of atom colors = %d\n",ncolor);
free_pattern(&locality);
}
// reasonable energy interval based on bulk copper calculations in the Julich tight-binding code
double min_energy = -10.0/E0, max_energy = 32.0/E0, approximation_error = 0.0;
double *pcoeff;
double complex *w, *z;
// fit the Fermi-Dirac polynomial approximation (Chebyshev interpolation then truncation)
double hwt = 2.0/(max_energy - min_energy);
double owt = -(max_energy + min_energy)/(max_energy - min_energy);
if(solver == 3 || solver == 5 || solver == 7)
{
pcoeff = (double*)malloc(sizeof(double)*napprox);
approximation_error = polynomial_approximation(napprox,min_energy,max_energy,potential,temperature,pcoeff);
for(int i=0 ; i<napprox ; i++) { pcoeff[i] *= 2.0; } // spin degeneracy factor
printf("approximation error (%d Chebyshev polynomials) = %e\n",napprox,2.0*approximation_error);
}
// parse the Fermi-Dirac rational approximation table (read from a precomputed table of approximations)
if(solver == 2 || solver == 4 || solver == 6 || solver == 8 || solver == 9)
{
w = (double complex*)malloc(sizeof(double complex)*napprox);
z = (double complex*)malloc(sizeof(double complex)*napprox);
approximation_error = rational_approximation(napprox,min_energy,potential,temperature,w,z);
for(int i=0 ; i<napprox ; i++) { w[i] *= 2.0; } // spin degeneracy factor
printf("approximation error (%d pole pairs) = %e\n",napprox,2.0*approximation_error);
}
// solver-dependent inner loop
switch(solver)
{
// no solver: fill density & response w/ overlap
case 0:
{
printf("no solver (pre-processing & post-processing only)\n");
copy_mat(nblock,&sparsity,overlap,density);
copy_mat(nblock,&sparsity,overlap,response);
} break;
// reference solver: dense matrix diagonalization
case 1:
{
printf("LAPACK solver\n");
dense_solver(nblock,potential,temperature,min_energy,max_energy,&sparsity,hamiltonian,overlap,density,response);
} break;
// PEXSI-based rational-approximation solver (quadratic scaling in 3D)
// NOTE: only mpirank == 0 enters the PEXSI solver here, the other ranks enter near the end of main
case 2:
{
printf("PEXSI solver\n");
PEXSI_solver(mpirank,mpisize,nblock,napprox,w,z,&sparsity,hamiltonian,overlap,density,response);
} break;
// quadratic-scaling polynomial-approximation solver
case 3:
{
printf("polynomial solver\n");
quad_poly_solver(nblock,napprox,res_tol,hwt,owt,pcoeff,&sparsity,hamiltonian,overlap,density,response);
} break;
// quadratic-scaling rational-approximation solver
case 4:
{
printf("rational solver\n");
quad_rational_solver(nblock,napprox,res_tol,w,z,&sparsity,hamiltonian,overlap,density,response);
} break;
// local polynomial-approximation solver
case 5:
{
printf("localized polynomial solver\n");
local_poly_solver(nblock,napprox,res_tol,hwt,owt,pcoeff,&locality,&sparsity,hamiltonian,overlap,density,response);
} break;
// local rational-approximation solver
case 6:
{
printf("localized rational solver\n");
local_rational_solver(nblock,napprox,res_tol,w,z,&locality,&sparsity,hamiltonian,overlap,density,response);
} break;
// random polynomial-approximation solver
case 7:
{
printf("randomized polynomial solver\n");
random_poly_solver(nblock,napprox,ncolor,nsample,seed,res_tol,hwt,owt,pcoeff,color,atom_ptr,&sparsity,hamiltonian,
overlap,density,response);
} break;
// random rational-approximation solver
case 8:
{
printf("randomized rational solver\n");
random_rational_solver(nblock,napprox,ncolor,nsample,seed,res_tol,w,z,color,atom_ptr,&sparsity,hamiltonian,overlap,
density,response);
} break;
// infinite rational-approximation solver
case 9:
{
printf("infinite rational solver\n");
infinite_rational_solver(nblock,napprox,res_tol,atom,w,z,&sparsity,hamiltonian,overlap,density,response);
} break;
// infinite reciprocal-space solver
case 10:
{
printf("infinite k-space solver\n");
infinite_reciprocal_solver(nblock,potential,temperature,min_energy,max_energy,napprox,latvec,atom,&sparsity,hamiltonian,
overlap,density,response);
} break;
// infinite localization tester
case -1:
{
printf("infinite preconditioning tester\n");
double complex z0 = potential + I*M_PI*temperature; // first Matsubara pole
double complex z1 = potential + I*3.0*M_PI*temperature; // second Matsubara pole
infinite_pre_tester(nblock,nsample,pre_radius,local_radius,res_tol,z0,z1,&sparsity,atom,latvec,hamiltonian,overlap);
} break;
default:
{
printf("ERROR: unknown solver, %d is not contained in { -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }\n",solver);
MPI_Abort(MPI_COMM_WORLD,0);
}
}
// solver-independent energy, number, & force calculations on block density & response matrices
if(solver == 9 || solver == 10) { sparsity.ncol = 1; } // observable contributions only from the central cell of crystals
double number = dot_mat(nblock,&sparsity,density,overlap);
double energy = dot_mat(nblock,&sparsity,density,hamiltonian);
double *force = (double*)malloc(sizeof(double)*3*natom);
double *hblock_force = (double*)malloc(sizeof(double)*3*nblock*nblock);
double *oblock_force = (double*)malloc(sizeof(double)*3*nblock*nblock);
for(int i=0 ; i<3*natom ; i++) { force[i] = 0.0; }
for(int i=0 ; i<sparsity.ncol ; i++)
for(int j=sparsity.col[i] ; j<sparsity.col[i+1] ; j++)
{
// diagonal force contributions
if(i == sparsity.row[j])
{
// contributes to all terms that are neighbors of atom i
for(int k=sparsity.col[i] ; k<sparsity.col[i+1] ; k++)
{
tb_diagonal_force(i,sparsity.row[k],natom,atom,sparsity.col[i+1]-sparsity.col[i],
&(sparsity.row[sparsity.col[i]]),¶m,hblock_force);
for(int l=0 ; l<3 ; l++)
for(int m=0 ; m<nblock*nblock ; m++)
{ force[l+sparsity.row[k]*3] += density[j][m]*hblock_force[m+l*nblock*nblock]; }
}
}
else // off-diagonal force contributions (general case not assuming numerical symmetry of density & response matrices)
{
tb_offdiagonal_force(i,sparsity.row[j],natom,atom,¶m,hblock_force,oblock_force);
for(int k=0 ; k<3 ; k++)
for(int l=0 ; l<nblock*nblock ; l++)
{
force[k+i*3] += density[j][l]*hblock_force[l+k*nblock*nblock] + response[j][l]*oblock_force[l+k*nblock*nblock];
}
tb_offdiagonal_force(sparsity.row[j],i,natom,atom,¶m,hblock_force,oblock_force);
for(int k=0 ; k<3 ; k++)
for(int l=0 ; l<nblock ; l++)
for(int m=0 ; m<nblock ; m++)
{
force[k+sparsity.row[j]*3] += density[j][m+l*nblock]*hblock_force[l+(m+k*nblock)*nblock]
+ response[j][m+l*nblock]*oblock_force[l+(m+k*nblock)*nblock];
}
}
}
// physical outputs
if(solver != 0 && solver != -1)
{
printf("number = %16.16e\n",number);
printf("energy = %16.16e\n",energy*E0);
}
if(solver == 9 || solver == 10)
{
double force0[3], stress[9];
for(int i=0 ; i<3 ; i++) { force0[i] = 0.0; }
for(int i=0 ; i<9 ; i++) { stress[i] = 0.0; }
for(int i=0 ; i<natom ; i++)
{
for(int j=0 ; j<3 ; j++)
{ force0[j] += force[j+i*3]; }
for(int j=0 ; j<3 ; j++)
for(int k=0 ; k<3 ; k++)
{ stress[k+j*3] += atom[k+i*3]*force[j+i*3]; }
}
for(int i=0 ; i<9 ; i++) { stress[i] /= volume; }
printf("force = { %16.16e , %16.16e , %16.16e }\n",force0[0]*E0/A0,force0[1]*E0/A0,force0[2]*E0/A0);
printf("stress = { %16.16e , %16.16e , %16.16e }\n",stress[0]*P0,stress[1]*P0,stress[2]*P0);
printf(" { %16.16e , %16.16e , %16.16e }\n",stress[3]*P0,stress[4]*P0,stress[5]*P0);
printf(" { %16.16e , %16.16e , %16.16e }\n",stress[6]*P0,stress[7]*P0,stress[8]*P0);
}
else if(solver != 0 && solver != -1)
{
for(int i=0 ; i<natom ; i++)
{ printf("force[%d] = { %16.16e , %16.16e , %16.16e }\n",i,force[0+3*i]*E0/A0,force[1+3*i]*E0/A0,force[2+3*i]*E0/A0); }
}
// final timing point
double time2 = omp_get_wtime();
printf("total time usage = %e s\n",time2-time1);
// print density & response matrices to a debug file (1st block column only for periodic systems)
if(solver != 0 && solver != -1)
{
FILE *debug_file = fopen("debug.out","w");
fprintf(debug_file,"%d\n",sparsity.col[sparsity.ncol]*nblock*nblock);
int index = 0;
for(int i=0 ; i<sparsity.col[sparsity.ncol] ; i++)
{
for(int j=0 ; j<nblock*nblock ; j++)
{
fprintf(debug_file,"%d %16.16e %16.16e\n",index++,density[i][j],response[i][j]);
}
}
fclose(debug_file);
}
// deallocate remaining memory
free(oblock_force);
free(hblock_force);
free(force);
if(solver == 2 || solver == 4 || solver == 6 || solver == 8)
{ free(w); free(z); }
if(solver == 3 || solver == 5 || solver == 7)
{ free(pcoeff); }
if(solver == 7 || solver == 8)
{ free(color); free(atom_ptr); }
if(solver == 5 || solver == 6)
{ free_pattern(&locality); }
free(response[0]); free(response);
free(density[0]); free(density);
free(overlap[0]); free(overlap);
free(hamiltonian[0]); free(hamiltonian);
free_pattern(&sparsity);
if(solver == 9 || solver == 10 || solver == -1)
{ free(latvec); }
free(atom);
}
else // mpirank != 0 branch of the main program
{
// PEXSI-based rational-approximation solver (quadratic scaling in 3D)
// NOTE: mpirank != 0 enter the PEXSI solver here, mpirank == 0 enters above
PEXSI_solver(mpirank,mpisize,0,0,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
}
// total system resource usage statistics
struct rusage my_usage;
long int global_memory;
getrusage(RUSAGE_SELF,&my_usage);
MPI_Reduce(&(my_usage.ru_maxrss),&global_memory,1,MPI_LONG,MPI_SUM,0,MPI_COMM_WORLD);
if(mpirank == 0)
{ printf("total memory usage = %ld kb\n",global_memory); }
// normal MPI termination
MPI_Finalize();
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////
////////////////////////////////
////////////////
////////
////
//
|
hypre_hopscotch_hash.h | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/**
* Hopscotch hash is modified from the code downloaded from
* https://sites.google.com/site/cconcurrencypackage/hopscotch-hashing
* with the following terms of usage
*/
////////////////////////////////////////////////////////////////////////////////
//TERMS OF USAGE
//------------------------------------------------------------------------------
//
// Permission to use, copy, modify and distribute this software and
// its documentation for any purpose is hereby granted without fee,
// provided that due acknowledgments to the authors are provided and
// this permission notice appears in all copies of the software.
// The software is provided "as is". There is no warranty of any kind.
//
//Authors:
// Maurice Herlihy
// Brown University
// and
// Nir Shavit
// Tel-Aviv University
// and
// Moran Tzafrir
// Tel-Aviv University
//
// Date: July 15, 2008.
//
////////////////////////////////////////////////////////////////////////////////
// Programmer : Moran Tzafrir (MoranTza@gmail.com)
// Modified : Jongsoo Park (jongsoo.park@intel.com)
// Oct 1, 2015.
//
////////////////////////////////////////////////////////////////////////////////
#ifndef hypre_HOPSCOTCH_HASH_HEADER
#define hypre_HOPSCOTCH_HASH_HEADER
//#include <strings.h>
#include <string.h>
#include <stdio.h>
#include <limits.h>
#include <math.h>
#ifdef HYPRE_USING_OPENMP
#include <omp.h>
#endif
#include "_hypre_utilities.h"
// Potentially architecture specific features used here:
// __sync_val_compare_and_swap
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************
* This next section of code is here instead of in _hypre_utilities.h to get
* around some portability issues with Visual Studio. By putting it here, we
* can explicitly include this '.h' file in a few files in hypre and compile
* them with C++ instead of C (VS does not support C99 'inline').
******************************************************************************/
#ifdef HYPRE_USING_ATOMIC
static inline HYPRE_Int
hypre_compare_and_swap( HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval )
{
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
return __sync_val_compare_and_swap(ptr, oldval, newval);
//#elif defind _MSC_VER
//return _InterlockedCompareExchange((long *)ptr, newval, oldval);
//#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
// JSP: not many compilers have implemented this, so comment out for now
//_Atomic HYPRE_Int *atomic_ptr = ptr;
//atomic_compare_exchange_strong(atomic_ptr, &oldval, newval);
//return oldval;
#endif
}
static inline HYPRE_Int
hypre_fetch_and_add( HYPRE_Int *ptr, HYPRE_Int value )
{
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
return __sync_fetch_and_add(ptr, value);
//#elif defined _MSC_VER
//return _InterlockedExchangeAdd((long *)ptr, value);
//#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
// JSP: not many compilers have implemented this, so comment out for now
//_Atomic HYPRE_Int *atomic_ptr = ptr;
//return atomic_fetch_add(atomic_ptr, value);
#endif
}
#else // !HYPRE_USING_ATOMIC
static inline HYPRE_Int
hypre_compare_and_swap( HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval )
{
if (*ptr == oldval)
{
*ptr = newval;
return oldval;
}
else return *ptr;
}
static inline HYPRE_Int
hypre_fetch_and_add( HYPRE_Int *ptr, HYPRE_Int value )
{
HYPRE_Int oldval = *ptr;
*ptr += value;
return oldval;
}
#endif // !HYPRE_USING_ATOMIC
/******************************************************************************/
// Constants ................................................................
#define HYPRE_HOPSCOTCH_HASH_HOP_RANGE (32)
#define HYPRE_HOPSCOTCH_HASH_INSERT_RANGE (4*1024)
#define HYPRE_HOPSCOTCH_HASH_EMPTY (0)
#define HYPRE_HOPSCOTCH_HASH_BUSY (1)
// Small Utilities ..........................................................
static inline HYPRE_Int
first_lsb_bit_indx( hypre_uint x )
{
HYPRE_Int pos;
#if defined(_MSC_VER)
if (x == 0)
{
pos = 0;
}
else
{
for (pos = 1; !(x & 1); ++pos)
{
x >>= 1;
}
}
#else
pos = ffs(x);
#endif
return (pos - 1);
}
/**
* hypre_Hash is adapted from xxHash with the following license.
*/
/*
xxHash - Extremely Fast Hash algorithm
Header File
Copyright (C) 2012-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- xxHash source repository : https://github.com/Cyan4973/xxHash
*/
/***************************************
* Constants
***************************************/
#define HYPRE_XXH_PRIME32_1 2654435761U
#define HYPRE_XXH_PRIME32_2 2246822519U
#define HYPRE_XXH_PRIME32_3 3266489917U
#define HYPRE_XXH_PRIME32_4 668265263U
#define HYPRE_XXH_PRIME32_5 374761393U
#define HYPRE_XXH_PRIME64_1 11400714785074694791ULL
#define HYPRE_XXH_PRIME64_2 14029467366897019727ULL
#define HYPRE_XXH_PRIME64_3 1609587929392839161ULL
#define HYPRE_XXH_PRIME64_4 9650029242287828579ULL
#define HYPRE_XXH_PRIME64_5 2870177450012600261ULL
#define HYPRE_XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
#define HYPRE_XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
#if defined(HYPRE_MIXEDINT) || defined(HYPRE_BIGINT)
static inline HYPRE_BigInt
hypre_BigHash( HYPRE_BigInt input )
{
hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input);
hypre_ulongint k1 = input;
k1 *= HYPRE_XXH_PRIME64_2;
k1 = HYPRE_XXH_rotl64(k1, 31);
k1 *= HYPRE_XXH_PRIME64_1;
h64 ^= k1;
h64 = HYPRE_XXH_rotl64(h64, 27)*HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4;
h64 ^= h64 >> 33;
h64 *= HYPRE_XXH_PRIME64_2;
h64 ^= h64 >> 29;
h64 *= HYPRE_XXH_PRIME64_3;
h64 ^= h64 >> 32;
#ifndef NDEBUG
if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64) {
hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY);
hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64);
}
#endif
return h64;
}
#else
static inline HYPRE_Int
hypre_BigHash(HYPRE_Int input)
{
hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input);
// 1665863975 is added to input so that
// only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY.
// Hence, we're fine as long as key is non-negative.
h32 += (input + 1665863975)*HYPRE_XXH_PRIME32_3;
h32 = HYPRE_XXH_rotl32(h32, 17)*HYPRE_XXH_PRIME32_4;
h32 ^= h32 >> 15;
h32 *= HYPRE_XXH_PRIME32_2;
h32 ^= h32 >> 13;
h32 *= HYPRE_XXH_PRIME32_3;
h32 ^= h32 >> 16;
//hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32);
return h32;
}
#endif
#ifdef HYPRE_BIGINT
static inline HYPRE_Int
hypre_Hash(HYPRE_Int input)
{
hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input);
hypre_ulongint k1 = input;
k1 *= HYPRE_XXH_PRIME64_2;
k1 = HYPRE_XXH_rotl64(k1, 31);
k1 *= HYPRE_XXH_PRIME64_1;
h64 ^= k1;
h64 = HYPRE_XXH_rotl64(h64, 27)*HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4;
h64 ^= h64 >> 33;
h64 *= HYPRE_XXH_PRIME64_2;
h64 ^= h64 >> 29;
h64 *= HYPRE_XXH_PRIME64_3;
h64 ^= h64 >> 32;
#ifndef NDEBUG
if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64) {
hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY);
hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64);
}
#endif
return h64;
}
#else
static inline HYPRE_Int
hypre_Hash(HYPRE_Int input)
{
hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input);
// 1665863975 is added to input so that
// only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY.
// Hence, we're fine as long as key is non-negative.
h32 += (input + 1665863975)*HYPRE_XXH_PRIME32_3;
h32 = HYPRE_XXH_rotl32(h32, 17)*HYPRE_XXH_PRIME32_4;
h32 ^= h32 >> 15;
h32 *= HYPRE_XXH_PRIME32_2;
h32 ^= h32 >> 13;
h32 *= HYPRE_XXH_PRIME32_3;
h32 ^= h32 >> 16;
//hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32);
return h32;
}
#endif
static inline void
hypre_UnorderedIntSetFindCloserFreeBucket( hypre_UnorderedIntSet *s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
HYPRE_Int *free_bucket,
HYPRE_Int *free_dist )
{
HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = s->hopInfo[move_bucket];
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]);
if(start_seg != move_segment)
omp_set_lock(&move_segment->lock);
#endif
if (start_hop_info == s->hopInfo[move_bucket])
{
// new_free_bucket -> free_bucket and empty new_free_bucket
HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist;
s->key[*free_bucket] = s->key[new_free_bucket];
s->hash[*free_bucket] = s->hash[new_free_bucket];
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
s->hopInfo[move_bucket] |= (1U << move_free_dist);
s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
}
++move_bucket;
}
*free_bucket = -1;
*free_dist = 0;
}
static inline void
hypre_UnorderedBigIntSetFindCloserFreeBucket( hypre_UnorderedBigIntSet *s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
HYPRE_Int *free_bucket,
HYPRE_Int *free_dist )
{
HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = s->hopInfo[move_bucket];
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]);
if(start_seg != move_segment)
omp_set_lock(&move_segment->lock);
#endif
if (start_hop_info == s->hopInfo[move_bucket])
{
// new_free_bucket -> free_bucket and empty new_free_bucket
HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist;
s->key[*free_bucket] = s->key[new_free_bucket];
s->hash[*free_bucket] = s->hash[new_free_bucket];
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
s->hopInfo[move_bucket] |= (1U << move_free_dist);
s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
}
++move_bucket;
}
*free_bucket = -1;
*free_dist = 0;
}
static inline void
hypre_UnorderedIntMapFindCloserFreeBucket( hypre_UnorderedIntMap *m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
hypre_HopscotchBucket **free_bucket,
HYPRE_Int *free_dist)
{
hypre_HopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = move_bucket->hopInfo;
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]);
if (start_seg != move_segment)
omp_set_lock(&move_segment->lock);
#endif
if (start_hop_info == move_bucket->hopInfo)
{
// new_free_bucket -> free_bucket and empty new_free_bucket
hypre_HopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist;
(*free_bucket)->data = new_free_bucket->data;
(*free_bucket)->key = new_free_bucket->key;
(*free_bucket)->hash = new_free_bucket->hash;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
move_bucket->hopInfo |= (1U << move_free_dist);
move_bucket->hopInfo &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
}
++move_bucket;
}
*free_bucket = NULL;
*free_dist = 0;
}
static inline void
hypre_UnorderedBigIntMapFindCloserFreeBucket( hypre_UnorderedBigIntMap *m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *start_seg,
#endif
hypre_BigHopscotchBucket **free_bucket,
HYPRE_Int *free_dist)
{
hypre_BigHopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = move_bucket->hopInfo;
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]);
if (start_seg != move_segment)
omp_set_lock(&move_segment->lock);
#endif
if (start_hop_info == move_bucket->hopInfo)
{
// new_free_bucket -> free_bucket and empty new_free_bucket
hypre_BigHopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist;
(*free_bucket)->data = new_free_bucket->data;
(*free_bucket)->key = new_free_bucket->key;
(*free_bucket)->hash = new_free_bucket->hash;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
move_bucket->hopInfo |= (1U << move_free_dist);
move_bucket->hopInfo &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
}
++move_bucket;
}
*free_bucket = NULL;
*free_dist = 0;
}
void hypre_UnorderedIntSetCreate( hypre_UnorderedIntSet *s,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedBigIntSetCreate( hypre_UnorderedBigIntSet *s,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedIntMapCreate( hypre_UnorderedIntMap *m,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedBigIntMapCreate( hypre_UnorderedBigIntMap *m,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedIntSetDestroy( hypre_UnorderedIntSet *s );
void hypre_UnorderedBigIntSetDestroy( hypre_UnorderedBigIntSet *s );
void hypre_UnorderedIntMapDestroy( hypre_UnorderedIntMap *m );
void hypre_UnorderedBigIntMapDestroy( hypre_UnorderedBigIntMap *m );
// Query Operations .........................................................
static inline HYPRE_Int
hypre_UnorderedIntSetContains( hypre_UnorderedIntSet *s,
HYPRE_Int key )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask];
#endif
HYPRE_Int bucket = hash & s->bucketMask;
hypre_uint hopInfo = s->hopInfo[bucket];
if (0 == hopInfo)
return 0;
else if (1 == hopInfo )
{
if (hash == s->hash[bucket] && key == s->key[bucket])
return 1;
else return 0;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if (hash == s->hash[currElm] && key == s->key[currElm])
return 1;
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
return 0;
#endif
HYPRE_Int i;
for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i)
{
if (hash == s->hash[bucket + i] && key == s->key[bucket + i])
return 1;
}
return 0;
}
static inline HYPRE_Int
hypre_UnorderedBigIntSetContains( hypre_UnorderedBigIntSet *s,
HYPRE_BigInt key )
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[(HYPRE_Int)(hash & s->segmentMask)];
#endif
HYPRE_Int bucket = (HYPRE_Int)(hash & s->bucketMask);
hypre_uint hopInfo = s->hopInfo[bucket];
if (0 == hopInfo)
return 0;
else if (1 == hopInfo )
{
if (hash == s->hash[bucket] && key == s->key[bucket])
return 1;
else return 0;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if (hash == s->hash[currElm] && key == s->key[currElm])
return 1;
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
return 0;
#endif
HYPRE_Int i;
for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i)
{
if (hash == s->hash[bucket + i] && key == s->key[bucket + i])
return 1;
}
return 0;
}
/**
* @ret -1 if key doesn't exist
*/
static inline HYPRE_Int
hypre_UnorderedIntMapGet( hypre_UnorderedIntMap *m,
HYPRE_Int key )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask];
#endif
hypre_HopscotchBucket *elmAry = &(m->table[hash & m->bucketMask]);
hypre_uint hopInfo = elmAry->hopInfo;
if (0 == hopInfo)
return -1;
else if (1 == hopInfo )
{
if (hash == elmAry->hash && key == elmAry->key)
return elmAry->data;
else return -1;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_HopscotchBucket* currElm = elmAry + i;
if (hash == currElm->hash && key == currElm->key)
return currElm->data;
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
return -1;
#endif
hypre_HopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]);
HYPRE_Int i;
for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket)
{
if (hash == currBucket->hash && key == currBucket->key)
return currBucket->data;
}
return -1;
}
static inline
HYPRE_Int hypre_UnorderedBigIntMapGet( hypre_UnorderedBigIntMap *m,
HYPRE_BigInt key )
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//CHECK IF ALREADY CONTAIN ................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[(HYPRE_Int)(hash & m->segmentMask)];
#endif
hypre_BigHopscotchBucket *elmAry = &(m->table[(HYPRE_Int)(hash & m->bucketMask)]);
hypre_uint hopInfo = elmAry->hopInfo;
if (0 == hopInfo)
return -1;
else if (1 == hopInfo )
{
if (hash == elmAry->hash && key == elmAry->key)
return elmAry->data;
else return -1;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
HYPRE_Int startTimestamp = segment->timestamp;
#endif
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_BigHopscotchBucket* currElm = elmAry + i;
if (hash == currElm->hash && key == currElm->key)
return currElm->data;
hopInfo &= ~(1U << i);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (segment->timestamp == startTimestamp)
return -1;
#endif
hypre_BigHopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]);
HYPRE_Int i;
for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket)
{
if (hash == currBucket->hash && key == currBucket->key)
return currBucket->data;
}
return -1;
}
//status Operations .........................................................
static inline
HYPRE_Int hypre_UnorderedIntSetSize( hypre_UnorderedIntSet *s )
{
HYPRE_Int counter = 0;
HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i])
{
++counter;
}
}
return counter;
}
static inline
HYPRE_Int hypre_UnorderedBigIntSetSize( hypre_UnorderedBigIntSet *s )
{
HYPRE_Int counter = 0;
HYPRE_BigInt n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i])
{
++counter;
}
}
return counter;
}
static inline HYPRE_Int
hypre_UnorderedIntMapSize( hypre_UnorderedIntMap *m )
{
HYPRE_Int counter = 0;
HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash )
{
++counter;
}
}
return counter;
}
static inline HYPRE_Int
hypre_UnorderedBigIntMapSize( hypre_UnorderedBigIntMap *m )
{
HYPRE_Int counter = 0;
HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash )
{
++counter;
}
}
return counter;
}
HYPRE_Int *hypre_UnorderedIntSetCopyToArray( hypre_UnorderedIntSet *s, HYPRE_Int *len );
HYPRE_BigInt *hypre_UnorderedBigIntSetCopyToArray( hypre_UnorderedBigIntSet *s, HYPRE_Int *len );
//modification Operations ...................................................
static inline void
hypre_UnorderedIntSetPut( hypre_UnorderedIntSet *s,
HYPRE_Int key )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask];
omp_set_lock(&segment->lock);
#endif
HYPRE_Int bucket = hash&s->bucketMask;
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = s->hopInfo[bucket];
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if(hash == s->hash[currElm] && key == s->key[currElm])
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
HYPRE_Int free_bucket = bucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket],
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
break;
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
s->key[free_bucket] = key;
s->hash[free_bucket] = hash;
s->hopInfo[bucket] |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hypre_UnorderedIntSetFindCloserFreeBucket(s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
} while (-1 != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return;
}
static inline void
hypre_UnorderedBigIntSetPut( hypre_UnorderedBigIntSet *s,
HYPRE_BigInt key )
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask];
omp_set_lock(&segment->lock);
#endif
HYPRE_Int bucket = (HYPRE_Int)(hash&s->bucketMask);
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = s->hopInfo[bucket];
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if(hash == s->hash[currElm] && key == s->key[currElm])
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
HYPRE_Int free_bucket = bucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket],
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
break;
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
s->key[free_bucket] = key;
s->hash[free_bucket] = hash;
s->hopInfo[bucket] |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return;
}
hypre_UnorderedBigIntSetFindCloserFreeBucket(s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
} while (-1 != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return;
}
static inline HYPRE_Int
hypre_UnorderedIntMapPutIfAbsent( hypre_UnorderedIntMap *m,
HYPRE_Int key, HYPRE_Int data )
{
//CALCULATE HASH ..........................
#ifdef HYPRE_BIGINT
HYPRE_Int hash = hypre_BigHash(key);
#else
HYPRE_Int hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask];
omp_set_lock(&segment->lock);
#endif
hypre_HopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]);
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = startBucket->hopInfo;
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_HopscotchBucket* currElm = startBucket + i;
if (hash == currElm->hash && key == currElm->key)
{
HYPRE_Int rc = currElm->data;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return rc;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
hypre_HopscotchBucket* free_bucket = startBucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&free_bucket->hash,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
break;
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
free_bucket->data = data;
free_bucket->key = key;
free_bucket->hash = hash;
startBucket->hopInfo |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
hypre_UnorderedIntMapFindCloserFreeBucket(m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
} while (NULL != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
static inline HYPRE_Int
hypre_UnorderedBigIntMapPutIfAbsent( hypre_UnorderedBigIntMap *m,
HYPRE_BigInt key, HYPRE_Int data)
{
//CALCULATE HASH ..........................
#if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT)
HYPRE_BigInt hash = hypre_BigHash(key);
#else
HYPRE_BigInt hash = hypre_Hash(key);
#endif
//LOCK KEY HASH ENTERY ....................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask];
omp_set_lock(&segment->lock);
#endif
hypre_BigHopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]);
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = startBucket->hopInfo;
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_BigHopscotchBucket* currElm = startBucket + i;
if (hash == currElm->hash && key == currElm->key)
{
HYPRE_Int rc = currElm->data;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return rc;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
hypre_BigHopscotchBucket* free_bucket = startBucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) &&
(HYPRE_HOPSCOTCH_HASH_EMPTY ==
hypre_compare_and_swap((HYPRE_Int *)&free_bucket->hash,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY,
(HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
break;
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
free_bucket->data = data;
free_bucket->key = key;
free_bucket->hash = hash;
startBucket->hopInfo |= 1U << free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
omp_unset_lock(&segment->lock);
#endif
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
hypre_UnorderedBigIntMapFindCloserFreeBucket(m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
segment,
#endif
&free_bucket, &free_dist);
} while (NULL != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
#ifdef __cplusplus
} // extern "C"
#endif
#endif // hypre_HOPSCOTCH_HASH_HEADER
|
simd-8.c | /* { dg-do run } */
/* { dg-additional-options "-msse2" { target sse2_runtime } } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
extern void abort ();
int a[32][32] __attribute__((aligned (32))) = { { 1 } };
struct S { int s; };
#pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s)
#pragma omp declare reduction (foo:struct S:omp_out.s += omp_in.s)
#pragma omp declare reduction (foo:int:omp_out += omp_in)
__attribute__((noinline, noclone)) int
foo (void)
{
int i, j, u = 0;
struct S s, t;
s.s = 0; t.s = 0;
#pragma omp simd aligned(a : 32) reduction(+:s) reduction(foo:t, u) collapse(2)
for (i = 0; i < 32; i++)
for (j = 0; j < 32; j++)
{
int x = a[i][j];
s.s += x;
t.s += x;
u += x;
}
if (t.s != s.s || u != s.s)
abort ();
return s.s;
}
int
main ()
{
int i, j;
for (i = 0; i < 32; i++)
for (j = 0; j < 32; j++)
a[i][j] = j + (i / 4);
int s = foo ();
if (s != 19456)
abort ();
return 0;
}
|
test.c |
#include <stdio.h>
#include <omp.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (1024*3)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
int main(void) {
check_offloading();
double A[N], B[N], C[N], D[N], E[N];
double S[N];
double p[2];
INIT();
long cpuExec = 0;
#pragma omp target map(tofrom: cpuExec)
{
cpuExec = omp_is_initial_device();
}
int max_threads = 224;
#undef FOR_CLAUSES
#define FOR_CLAUSES
#include "defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
PARALLEL(
{
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
},
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i]; \
B[i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], SUMS * (N/2*(N+1))))
}
//
// Test: private clause on omp for.
//
#undef FOR_CLAUSES
#define FOR_CLAUSES private(p,q)
#include "defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
PARALLEL(
double p = 2; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[i] += p; \
B[i] += q; \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], 6 + SUMS * (N/2*(N+1))))
}
//
// Test: firstprivate clause on omp for.
//
#undef FOR_CLAUSES
#define FOR_CLAUSES firstprivate(p,q)
#include "defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
PARALLEL(
double p = -4; \
double q = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i] + p; \
B[i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], SUMS * (N/2*(N+1))))
}
//
// Test: lastprivate clause on omp for.
//
double q0[1], q1[1], q2[1], q3[1], q4[1], q5[1], q6[1], q7[1], q8[1], q9[1];
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
TEST({
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
_Pragma("omp parallel if(threads[0] > 1) num_threads(threads[0])")
{
_Pragma("omp for lastprivate(q0)")
for (int i = 0; i < N; i++) {
q0[0] = C[i] + D[i];
A[i] += q0[0];
}
_Pragma("omp for schedule(auto) lastprivate(q1)")
for (int i = 0; i < N; i++) {
q1[0] = D[i] + E[i];
B[i] += q1[0];
}
_Pragma("omp for schedule(dynamic) lastprivate(q2)")
for (int i = 0; i < N; i++) {
q2[0] = C[i] + D[i];
A[i] += q2[0];
}
_Pragma("omp for schedule(guided) lastprivate(q3)")
for (int i = 0; i < N; i++) {
q3[0] = D[i] + E[i];
B[i] += q3[0];
}
_Pragma("omp for schedule(runtime) lastprivate(q4)")
for (int i = 0; i < N; i++) {
q4[0] = C[i] + D[i];
A[i] += q4[0];
}
_Pragma("omp for schedule(static) lastprivate(q5)")
for (int i = 0; i < N; i++) {
q5[0] = D[i] + E[i];
B[i] += q5[0];
}
_Pragma("omp for schedule(static,1) lastprivate(q6)")
for (int i = 0; i < N; i++) {
q6[0] = C[i] + D[i];
A[i] += q6[0];
}
_Pragma("omp for schedule(static,9) lastprivate(q7)")
for (int i = 0; i < N; i++) {
q7[0] = D[i] + E[i];
B[i] += q7[0];
}
_Pragma("omp for schedule(static,13) lastprivate(q8)")
for (int i = 0; i < N; i++) {
q8[0] = C[i] + D[i];
A[i] += q8[0];
}
_Pragma("omp for schedule(static,30000) lastprivate(q9)")
for (int i = 0; i < N; i++) {
q9[0] = D[i] + E[i];
B[i] += q9[0];
}
}
double tmp = q0[0] + q1[0] + q2[0] + q3[0] + q4[0] + \
q5[0] + q6[0] + q7[0] + q8[0] + q9[0];
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
}, VERIFY(0, 1, S[0], 5 * (N + (N/2*(N+1))) ));
}
//
// Test: private clause on omp for.
//
#undef FOR_CLAUSES
#define FOR_CLAUSES private(p)
#include "defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
PARALLEL(
p[0] = 2; p[1] = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p[0] = C[i] + D[i]; \
p[1] = D[i] + E[i]; \
A[i] += p[0]; \
B[i] += p[1]; \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], 6 + SUMS * (N/2*(N+1))))
}
//
// Test: firstprivate clause on omp for.
//
#undef FOR_CLAUSES
#define FOR_CLAUSES firstprivate(p)
#include "defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
PARALLEL(
p[0] = -4; p[1] = 4; \
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[i] += C[i] + D[i] + p[0]; \
B[i] += D[i] + E[i] + p[1]; \
if (i == N-1) { \
p[0] += 6; \
p[1] += 9; \
} \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], SUMS * (N/2*(N+1))))
}
//
// Test: collapse clause on omp for.
//
#undef FOR_CLAUSES
#define FOR_CLAUSES collapse(2)
#include "defines.h"
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
PARALLEL(
S[0] = 0; \
for (int i = 0; i < N; i++) { \
A[i] = B[i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[i*3+j] += C[i*3+j] + D[i*3+j]; \
B[i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[i] + B[i];
}
S[0] += tmp;
},
VERIFY(0, 1, S[0], SUMS * (N/2*(N+1))))
}
//
// Test: ordered clause on omp for.
//
#undef FOR_CLAUSES
#define FOR_CLAUSES ordered
#include "defines.h"
for (int t = 0; t <= max_threads; t += max_threads) {
int threads[1]; threads[0] = t;
PARALLEL(
S[0] = 0; \
,
for (int i = 0; i < N; i++) { \
_Pragma("omp ordered") \
S[0] += C[i] + D[i]; \
}
,
{
},
VERIFY(0, 1, S[0], SUMS * (N/2*(N+1))))
}
//
// Test: nowait clause on omp for.
// FIXME: Not sure how to test for correctness.
//
for (int t = 0; t <= max_threads; t++) {
int threads[1]; threads[0] = t;
TEST({
S[0] = 0;
for (int i = 0; i < N; i++) {
A[i] = B[i] = 0;
}
_Pragma("omp parallel if(threads[0] > 1) num_threads(threads[0])")
{
_Pragma("omp for nowait schedule(static,1)")
for (int i = 0; i < N; i++) {
A[i] = C[i] + D[i];
}
_Pragma("omp for nowait schedule(static,1)")
for (int i = 0; i < N; i++) {
B[i] = A[i] + D[i] + E[i];
}
_Pragma("omp barrier")
if (omp_get_thread_num() == 0) {
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += B[i];
}
S[0] += tmp;
}
}
}, VERIFY(0, 1, S[0], (N/2*(N+1)) ));
}
//
// Test: Ensure coalesced scheduling on GPU.
//
if (!cpuExec) {
int nthreads = 0;
// if the size of the iteration space does not
// exactly divide by the number of threads then
// there will be a residual number of values that
// need to be handled. The sum of these values is
// the sum of the first n natural numbers.
int residual;
#pragma omp target map(tofrom: nthreads)
#pragma omp teams num_teams(1) thread_limit(33)
{
int s = omp_get_team_num();
#pragma omp parallel num_threads(33)
for (int i = 0; i < 99; i++) {
if (i == 0) {
nthreads = omp_get_num_threads() + s - omp_get_team_num();
}
}
}
residual = 99 - nthreads * (99 / nthreads);
TESTD("omp target teams num_teams(1) thread_limit(33)", {
S[0] = 0;
for (int i = 0; i < 99; i++) {
A[i] = 0;
}
_Pragma("omp parallel num_threads(33)")
{
_Pragma("omp for")
for (int i = 0; i < 99; i++) {
A[i] += i - omp_get_thread_num();
}
_Pragma("omp for schedule(auto)")
for (int i = 0; i < 99; i++) {
A[i] += i - omp_get_thread_num();
}
_Pragma("omp for schedule(static,1)")
for (int i = 0; i < 99; i++) {
A[i] += i - omp_get_thread_num();
}
}
double tmp = 0;
for (int i = 0; i < 99; i++) {
tmp += A[i];
}
S[0] = tmp;
}, VERIFY(0, 1, S[0], 3 * ( 99 * 98 * 0.5 - 3 * 0.5 * nthreads * (nthreads - 1) - 0.5 * residual * (residual - 1)) ));
} else {
DUMP_SUCCESS(1);
}
//
// Test: Ensure that we have barriers after dynamic, guided,
// and ordered schedules, even with a nowait clause since the
// NVPTX runtime doesn't currently support concurrent execution
// of these constructs.
// FIXME: Not sure how to test for correctness at runtime.
//
if (!cpuExec) {
TEST({
for (int i = 0; i < N; i++) {
A[i] = 0;
}
_Pragma("omp parallel")
{
_Pragma("omp for nowait schedule(guided)")
for (int i = 0; i < N; i++) {
A[i] += C[i] + D[i];
}
_Pragma("omp for nowait schedule(dynamic)")
for (int i = 0; i < N; i++) {
A[i] += D[i] + E[i];
}
_Pragma("omp for nowait ordered")
for (int i = 0; i < N; i++) {
A[i] += C[i] + D[i];
}
}
}, VERIFY(0, N, A[i], 2*i+2) );
} else {
DUMP_SUCCESS(1);
}
//
// Test: Linear clause on target
//
if (!cpuExec) {
int l = 0;
ZERO(A);
#pragma omp target map(tofrom:A)
#pragma omp parallel for linear(l:2)
for(int i = 0 ; i < 10 ; i++)
A[i] = l;
int fail = 0;
for(int i = 0 ; i < 10 ; i++)
if(A[i] != i*2) {
printf("error at %d, val = %lf expected = %d\n", i, A[i], i*2);
fail = 1;
}
if(fail)
printf("Error\n");
else
printf("Succeeded\n");
} else {
DUMP_SUCCESS(1);
}
return 0;
}
|
polynomialSurface.h | #ifndef batoid_PolynomialSurface_h
#define batoid_PolynomialSurface_h
#include "surface.h"
namespace batoid {
#if defined(BATOID_GPU)
#pragma omp declare target
#endif
class PolynomialSurface : public Surface {
public:
PolynomialSurface(
const double* coefs, const double* coefs_gradx, const double* coefs_grady,
size_t xsize, size_t ysize
);
~PolynomialSurface();
virtual const Surface* getDevPtr() const override;
virtual double sag(double, double) const override;
virtual void normal(
double x, double y,
double& nx, double& ny, double& nz
) const override;
private:
const double* _coefs;
const double* _coefs_gradx;
const double* _coefs_grady;
const size_t _xsize, _ysize;
};
double horner2d(double x, double y, const double* coefs, size_t nx, size_t ny);
#if defined(BATOID_GPU)
#pragma omp end declare target
#endif
}
#endif // batoid_PolynomialSurface_h
|
GB_dense_subassign_23_template.c | //------------------------------------------------------------------------------
// GB_dense_subassign_23_template: C += B where C is dense; B is sparse or dense
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// All entries in C+=B are computed entirely in parallel, using the same kind of
// parallelism as Template/GB_AxB_colscale.c.
#include "GB_unused.h"
{
//--------------------------------------------------------------------------
// get C and B
//--------------------------------------------------------------------------
const GB_BTYPE *GB_RESTRICT Bx = (GB_BTYPE *) B->x ;
GB_CTYPE *GB_RESTRICT Cx = (GB_CTYPE *) C->x ;
ASSERT (GB_is_dense (C)) ;
const int64_t cnz = GB_NNZ_HELD (C) ;
if (GB_IS_BITMAP (B))
{
//----------------------------------------------------------------------
// C += B when C is dense and B is bitmap
//----------------------------------------------------------------------
const int8_t *GB_RESTRICT Bb = B->b ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
if (!Bb [p]) continue ;
GB_GETB (bij, Bx, p) ; // bij = B(i,j)
GB_BINOP (GB_CX (p), GB_CX (p), bij, 0, 0) ; // C(i,j) += bij
}
}
else if (kfirst_slice == NULL)
{
//----------------------------------------------------------------------
// C += B when both C and B are dense
//----------------------------------------------------------------------
ASSERT (GB_is_dense (B)) ;
#if defined ( GB_HAS_CBLAS ) && GB_OP_IS_PLUS_REAL
// C += B via GB_cblas_daxpy or GB_cblas_saxpy
GB_CBLAS_AXPY // Y += alpha*X
(
cnz, // length of X and Y (note: int64_t)
(GB_CTYPE) 1, // alpha is 1.0
Bx, // X, always stride 1
Cx, // Y, always stride 1
nthreads // maximum # of threads to use
) ;
#elif defined ( GB_HAS_CBLAS ) && GB_OP_IS_MINUS_REAL
// C -= B via GB_cblas_daxpy or GB_cblas_saxpy
GB_CBLAS_AXPY // Y += alpha*X
(
cnz, // length of X and Y (note: int64_t)
(GB_CTYPE) -1, // alpha is -1.0
Bx, // X, always stride 1
Cx, // Y, always stride 1
nthreads // maximum # of threads to use
) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETB (bij, Bx, p) ; // bij = B(i,j)
GB_BINOP (GB_CX (p), GB_CX (p), bij, 0, 0) ; // C(i,j) += bij
}
#endif
}
else
{
//----------------------------------------------------------------------
// C += B when C is dense and B is sparse
//----------------------------------------------------------------------
ASSERT (GB_JUMBLED_OK (B)) ;
const int64_t *GB_RESTRICT Bp = B->p ;
const int64_t *GB_RESTRICT Bh = B->h ;
const int64_t *GB_RESTRICT Bi = B->i ;
const int64_t bvlen = B->vlen ;
const int64_t cvlen = C->vlen ;
bool B_jumbled = B->jumbled ;
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
// if kfirst > klast then taskid does no work at all
int64_t kfirst = kfirst_slice [taskid] ;
int64_t klast = klast_slice [taskid] ;
//------------------------------------------------------------------
// C(:,kfirst:klast) += B(:,kfirst:klast)
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// find the part of B(:,k) and C(:,k) for this task
//--------------------------------------------------------------
int64_t j = GBH (Bh, k) ;
int64_t my_pB_start, my_pB_end ;
GB_get_pA (&my_pB_start, &my_pB_end, taskid, k,
kfirst, klast, pstart_slice, Bp, bvlen) ;
int64_t pB_start = GBP (Bp, k, bvlen) ;
int64_t pB_end = GBP (Bp, k+1, bvlen) ;
bool bjdense = ((pB_end - pB_start) == cvlen) ;
// pC points to the start of C(:,j) if C is dense
int64_t pC = j * cvlen ;
//--------------------------------------------------------------
// C(:,j) += B(:,j)
//--------------------------------------------------------------
if (bjdense && !B_jumbled)
{
//----------------------------------------------------------
// both C(:,j) and B(:,j) are dense
//----------------------------------------------------------
#if defined ( GB_HAS_CBLAS ) && GB_OP_IS_PLUS_REAL
// y += x via GB_cblas_daxpy or GB_cblas_saxpy.
// use a single thread since this is already in a
// parallel region.
int64_t len = my_pB_end - my_pB_start ;
int64_t i = my_pB_start - pB_start ;
int64_t p = pC + i ;
GB_CBLAS_AXPY // Y += alpha*X
(
len, // length of X and Y
(GB_CTYPE) 1, // alpha is 1.0
Bx + my_pB_start, // X, always stride 1
Cx + p, // Y, always stride 1
1 // use a single thread
) ;
#elif defined ( GB_HAS_CBLAS ) && GB_OP_IS_MINUS_REAL
// y -= x via GB_cblas_daxpy or GB_cblas_saxpy.
// use a single thread since this is already in a
// parallel region.
int64_t len = my_pB_end - my_pB_start ;
int64_t i = my_pB_start - pB_start ;
int64_t p = pC + i ;
GB_CBLAS_AXPY // Y += alpha*X
(
len, // length of X and Y
(GB_CTYPE) -1, // alpha is -1.0
Bx + my_pB_start, // X, always stride 1
Cx + p, // Y, always stride 1
1 // use a single thread
) ;
#else
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t pB = my_pB_start ; pB < my_pB_end ; pB++)
{
int64_t i = pB - pB_start ;
int64_t p = pC + i ;
// bij = B(i,j)
GB_GETB (bij, Bx, pB) ;
// C(i,j) += bij
GB_BINOP (GB_CX (p), GB_CX (p), bij, 0, 0) ;
}
#endif
}
else
{
//----------------------------------------------------------
// C(:,j) is dense; B(:,j) is sparse
//----------------------------------------------------------
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t pB = my_pB_start ; pB < my_pB_end ; pB++)
{
int64_t i = Bi [pB] ;
int64_t p = pC + i ;
GB_GETB (bij, Bx, pB) ; // bij = B(i,j)
// C(i,j) += bij
GB_BINOP (GB_CX (p), GB_CX (p), bij, 0, 0) ;
}
}
}
}
}
}
|
cubemap.h | #pragma once
#include <string>
#include "locator.h"
#include "device.h"
#include "command.h"
#include "create.h"
#include "image.h"
class CubeMap : public Image {
public:
CubeMap(std::string filename) {
createImage(filename);
createImageView();
createSampler();
}
~CubeMap() {
hw::loc::device()->destroy(textureSampler);
hw::loc::device()->destroy(textureImageView);
hw::loc::device()->destroy(textureImage);
hw::loc::device()->free(textureImageMemory);
}
VkImageView& view() {
return textureImageView;
}
VkSampler& sampler() {
return textureSampler;
}
private:
VkImage textureImage;
VkDeviceMemory textureImageMemory;
VkImageView textureImageView;
VkSampler textureSampler;
void createImage(std::string filename) {
stbi_uc* pixels[6];
int texWidth, texHeight, texChannels;
pixels[0] = stbi_load((filename + "/posx.jpg").data(), &texWidth, &texHeight, &texChannels, STBI_rgb_alpha);
pixels[1] = stbi_load((filename + "/negx.jpg").data(), &texWidth, &texHeight, &texChannels, STBI_rgb_alpha);
pixels[2] = stbi_load((filename + "/posy.jpg").data(), &texWidth, &texHeight, &texChannels, STBI_rgb_alpha);
pixels[3] = stbi_load((filename + "/negy.jpg").data(), &texWidth, &texHeight, &texChannels, STBI_rgb_alpha);
pixels[4] = stbi_load((filename + "/posz.jpg").data(), &texWidth, &texHeight, &texChannels, STBI_rgb_alpha);
pixels[5] = stbi_load((filename + "/negz.jpg").data(), &texWidth, &texHeight, &texChannels, STBI_rgb_alpha);
VkDeviceSize imageSize = texWidth * texHeight * 4;
VkDeviceSize cubeMapSize = texWidth * texHeight * 4 * 6;
#pragma omp parallel for
for (int i = 0; i < 6; i++)
if (!pixels[i])
throw std::runtime_error("failed to load cubemap image!");
VkBuffer stagingBuffer;
VkDeviceMemory stagingBufferMemory;
create::buffer(cubeMapSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, stagingBuffer, stagingBufferMemory);
void* data;
hw::loc::device()->map(stagingBufferMemory, cubeMapSize, data);
#pragma omp parallel for
for (int i = 0; i < 6; i++)
memcpy(static_cast<char *>(data) + (static_cast<size_t>(imageSize) * i), pixels[i], static_cast<size_t>(imageSize));
hw::loc::device()->unmap(stagingBufferMemory);
#pragma omp parallel for
for (int i = 0; i < 6; i++)
stbi_image_free(pixels[i]);
create::cubemap(texWidth, texHeight, VK_FORMAT_R8G8B8A8_SRGB, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, textureImage, textureImageMemory);
hw::loc::cmd()->transitionImageLayout(textureImage, VK_FORMAT_R8G8B8A8_SRGB, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 6);
hw::loc::cmd()->copyBufferToImage(stagingBuffer, textureImage, static_cast<uint32_t>(texWidth), static_cast<uint32_t>(texHeight), 6);
hw::loc::cmd()->transitionImageLayout(textureImage, VK_FORMAT_R8G8B8A8_SRGB, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 6);
hw::loc::device()->destroy(stagingBuffer);
hw::loc::device()->free(stagingBufferMemory);
}
void createImageView() {
textureImageView = create::imageView(textureImage, VK_FORMAT_R8G8B8A8_SRGB, VK_IMAGE_ASPECT_COLOR_BIT, 6, VK_IMAGE_VIEW_TYPE_CUBE);
}
void createSampler() {
create::sampler(textureSampler, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE);
}
};
|
GB_unaryop__minv_uint8_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_uint64
// op(A') function: GB_tran__minv_uint8_uint64
// C type: uint8_t
// A type: uint64_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_uint64
(
uint8_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
metadirective_device_kind_codegen.c | // RUN: %clang_cc1 -verify -fopenmp -x c -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp -x c -triple aarch64-unknown-linux -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp -x c -triple ppc64le-unknown-linux -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
void bar(void);
void foo(void) {
#pragma omp metadirective when(device = {kind(any)} \
: parallel)
bar();
#pragma omp metadirective when(device = {kind(host, cpu)} \
: parallel for num_threads(4))
for (int i = 0; i < 100; i++)
;
#pragma omp metadirective when(device = {kind(host)} \
: parallel for)
for (int i = 0; i < 100; i++)
;
#pragma omp metadirective when(device = {kind(nohost, gpu)} \
:) when(device = {kind(cpu)} \
: parallel)
bar();
#pragma omp metadirective when(device = {kind(any, cpu)} \
: parallel)
bar();
#pragma omp metadirective when(device = {kind(any, host)} \
: parallel)
bar();
#pragma omp metadirective when(device = {kind(gpu)} \
: target parallel for) default(parallel for)
for (int i = 0; i < 100; i++)
;
}
// CHECK-LABEL: define {{.+}} void @foo()
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_1:@.+]] to void
// CHECK-NEXT: @__kmpc_push_num_threads
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_2:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_3:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_4:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_5:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_6:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_7:@.+]] to void
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_1]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_2]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_3]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_4]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_5]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_6]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_7]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
#endif
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _DoublePixelPacket
{
double
red,
green,
blue,
alpha;
} DoublePixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
double
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
double
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
double
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *,ExceptionInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *,ExceptionInfo *),
SetGrayscaleImage(Image *,ExceptionInfo *),
SetImageColormap(Image *,CubeInfo *,ExceptionInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DefineImageColormap(Image *,CubeInfo *,NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info));
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither_method=image_info->dither == MagickFalse ?
NoDitherMethod : RiemersmaDitherMethod;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const Image *image,
const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(GetPixelAlpha(image,pixel) == OpaqueAlpha))
{
alpha_pixel->red=(double) GetPixelRed(image,pixel);
alpha_pixel->green=(double) GetPixelGreen(image,pixel);
alpha_pixel->blue=(double) GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel));
alpha_pixel->red=alpha*GetPixelRed(image,pixel);
alpha_pixel->green=alpha*GetPixelGreen(image,pixel);
alpha_pixel->blue=alpha*GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
}
static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info,
const PixelInfo *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->alpha == OpaqueAlpha))
{
alpha_pixel->red=(double) pixel->red;
alpha_pixel->green=(double) pixel->green;
alpha_pixel->blue=(double) pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
return;
}
alpha=(double) (QuantumScale*pixel->alpha);
alpha_pixel->red=alpha*pixel->red;
alpha_pixel->green=alpha*pixel->green;
alpha_pixel->blue=alpha*pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) |
((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3;
return(id);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
if (SetImageColormap(image,cube_info,exception) == MagickFalse)
return(MagickFalse);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 :
QuantumRange;
if (image->colors > 1)
{
intensity=0.0;
if (GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1))
intensity=(double) QuantumRange;
}
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace,exception);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue :
MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
double
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if (cube_info->quantize_info->colorspace != image->colorspace)
{
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace,exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace,
exception);
}
midpoint.red=(double) QuantumRange/2.0;
midpoint.green=(double) QuantumRange/2.0;
midpoint.blue=(double) QuantumRange/2.0;
midpoint.alpha=(double) QuantumRange/2.0;
error.alpha=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if (cube_info->quantize_info->colorspace != image->colorspace)
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
pixel;
register double
alpha,
beta,
distance;
register DoublePixelPacket
*magick_restrict q;
register PixelInfo
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(double) (QuantumScale*p->alpha);
beta=(double) (QuantumScale*q->alpha);
}
pixel=alpha*p->red-beta*q->red;
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->green-beta*q->green;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->blue-beta*q->blue;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=p->alpha-q->alpha;
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image,
ExceptionInfo *exception)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the DefineImageColormap method is:
%
% void DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register double
alpha;
register PixelInfo
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(double) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
q->alpha=(double) OpaqueAlpha;
}
else
{
double
opacity;
opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha);
q->alpha=(double) ClampToQuantum(opacity);
if (q->alpha == OpaqueAlpha)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
}
else
{
double
gamma;
gamma=(double) (QuantumScale*q->alpha);
gamma=PerceptibleReciprocal(gamma);
q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.blue);
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2*
sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
const char
*artifact;
double
amount;
DoublePixelPacket
**pixels;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
status=MagickTrue;
amount=1.0;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
amount=StringToDoubleInterval(artifact,1.0);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register Quantum
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel);
if (x > 0)
{
pixel.red+=7.0*amount*current[u-v].red/16;
pixel.green+=7.0*amount*current[u-v].green/16;
pixel.blue+=7.0*amount*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=7.0*amount*current[u-v].alpha/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=previous[u+v].alpha/16;
}
pixel.red+=5.0*amount*previous[u].red/16;
pixel.green+=5.0*amount*previous[u].green/16;
pixel.blue+=5.0*amount*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=5.0*amount*previous[u].alpha/16;
if (x > 0)
{
pixel.red+=3.0*amount*previous[u-v].red/16;
pixel.green+=3.0*amount*previous[u-v].green/16;
pixel.blue+=3.0*amount*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=3.0*amount*previous[u-v].alpha/16;
}
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
node_id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
node_id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[node_id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[node_id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image));
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),
q+u*GetPixelChannels(image));
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),
q+u*GetPixelChannels(image));
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),
q+u*GetPixelChannels(image));
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),
q+u*GetPixelChannels(image));
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixelInfo(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].alpha=pixel.alpha-color.alpha;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int,
ExceptionInfo *);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction,ExceptionInfo *exception)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
register Quantum
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
AssociateAlphaPixel(image,cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha+=p->weights[i]*p->error[i].alpha;
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(double) (4.0*(QuantumRange+1.0)*((double)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) p->cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q);
if (cube_info->associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info,exception));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
double
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither_method == NoDitherMethod)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(image,p);
if (image->alpha_trait == BlendPixelTrait)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*image->colormap[index].alpha);
}
distance=fabs((double) (alpha*GetPixelRed(image,p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K m e a n s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KmeansImage() applies k-means color reduction to an image. This is a
% colorspace clustering or segmentation technique.
%
% The format of the KmeansImage method is:
%
% MagickBooleanType KmeansImage(Image *image,const size_t number_colors,
% const size_t max_iterations,const double tolerance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_colors: number of colors to use as seeds.
%
% o max_iterations: maximum number of iterations while converging.
%
% o tolerance: the maximum tolerance.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _KmeansInfo
{
double
red,
green,
blue,
alpha,
black,
count,
distortion;
} KmeansInfo;
static KmeansInfo **DestroyKmeansThreadSet(KmeansInfo **kmeans_info)
{
register ssize_t
i;
assert(kmeans_info != (KmeansInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (kmeans_info[i] != (KmeansInfo *) NULL)
kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]);
kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info);
return(kmeans_info);
}
static KmeansInfo **AcquireKmeansThreadSet(const size_t number_colors)
{
KmeansInfo
**kmeans_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads,
sizeof(*kmeans_info));
if (kmeans_info == (KmeansInfo **) NULL)
return((KmeansInfo **) NULL);
(void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors,
sizeof(**kmeans_info));
if (kmeans_info[i] == (KmeansInfo *) NULL)
return(DestroyKmeansThreadSet(kmeans_info));
}
return(kmeans_info);
}
static inline double KmeansMetric(const Image *magick_restrict image,
const Quantum *magick_restrict p,const PixelInfo *magick_restrict q)
{
register double
gamma,
metric,
pixel;
gamma=1.0;
metric=0.0;
if ((image->alpha_trait != UndefinedPixelTrait) ||
(q->alpha_trait != UndefinedPixelTrait))
{
pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ?
q->alpha : OpaqueAlpha);
metric+=pixel*pixel;
if (image->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*GetPixelAlpha(image,p);
if (q->alpha_trait != UndefinedPixelTrait)
gamma*=QuantumScale*q->alpha;
}
if (image->colorspace == CMYKColorspace)
{
pixel=QuantumScale*(GetPixelBlack(image,p)-q->black);
metric+=gamma*pixel*pixel;
gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p));
gamma*=QuantumScale*(QuantumRange-q->black);
}
metric*=3.0;
pixel=QuantumScale*(GetPixelRed(image,p)-q->red);
if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse)
{
if (fabs((double) pixel) > 0.5)
pixel-=0.5;
pixel*=2.0;
}
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelGreen(image,p)-q->green);
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue);
metric+=gamma*pixel*pixel;
return(metric);
}
MagickExport MagickBooleanType KmeansImage(Image *image,
const size_t number_colors,const size_t max_iterations,const double tolerance,
ExceptionInfo *exception)
{
#define KmeansImageTag "Kmeans/Image"
#define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info))
CacheView
*image_view;
const char
*colors;
double
previous_tolerance;
KmeansInfo
**kmeans_pixels;
MagickBooleanType
verbose,
status;
register ssize_t
n;
size_t
number_threads;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colors=GetImageArtifact(image,"kmeans:seed-colors");
if (colors == (const char *) NULL)
{
CubeInfo
*cube_info;
QuantizeInfo
*quantize_info;
size_t
colors,
depth;
/*
Seed clusters from color quantization.
*/
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->colorspace=image->colorspace;
quantize_info->number_colors=number_colors;
quantize_info->dither_method=NoDitherMethod;
colors=number_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
cube_info=GetCubeInfo(quantize_info,depth,number_colors);
if (cube_info == (CubeInfo *) NULL)
{
quantize_info=DestroyQuantizeInfo(quantize_info);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=SetImageColormap(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
quantize_info=DestroyQuantizeInfo(quantize_info);
if (status == MagickFalse)
return(status);
}
else
{
char
color[MagickPathExtent];
register const char
*p;
/*
Seed clusters from color list (e.g. red;green;blue).
*/
status=AcquireImageColormap(image,number_colors,exception);
if (status == MagickFalse)
return(status);
for (n=0, p=colors; n < (ssize_t) image->colors; n++)
{
register const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,image->colormap+n,
exception);
if (*q == '\0')
{
n++;
break;
}
p=q+1;
}
if (n < (ssize_t) image->colors)
{
RandomInfo
*random_info;
/*
Seed clusters from random values.
*/
random_info=AcquireRandomInfo();
for ( ; n < (ssize_t) image->colors; n++)
{
(void) QueryColorCompliance("#000",AllCompliance,image->colormap+n,
exception);
image->colormap[n].red=RandomColorComponent(random_info);
image->colormap[n].green=RandomColorComponent(random_info);
image->colormap[n].blue=RandomColorComponent(random_info);
if (image->alpha_trait != BlendPixelTrait)
image->colormap[n].alpha=RandomColorComponent(random_info);
if (image->colorspace == CMYKColorspace)
image->colormap[n].black=RandomColorComponent(random_info);
}
random_info=DestroyRandomInfo(random_info);
}
}
/*
Iterative refinement.
*/
kmeans_pixels=AcquireKmeansThreadSet(number_colors);
if (kmeans_pixels == (KmeansInfo **) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
previous_tolerance=0.0;
verbose=IsStringTrue(GetImageArtifact(image,"debug"));
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
image_view=AcquireAuthenticCacheView(image,exception);
for (n=0; n < (ssize_t) max_iterations; n++)
{
double
distortion;
register ssize_t
i;
ssize_t
y;
for (i=0; i < (ssize_t) number_threads; i++)
(void) memset(kmeans_pixels[i],0,image->colors*sizeof(*kmeans_pixels[i]));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
min_distance;
register ssize_t
i;
ssize_t
j;
/*
Assign each pixel whose mean has the least squared color distance.
*/
j=0;
min_distance=KmeansMetric(image,q,image->colormap+0);
for (i=1; i < (ssize_t) image->colors; i++)
{
double
distance;
if (min_distance <= MagickEpsilon)
break;
distance=KmeansMetric(image,q,image->colormap+i);
if (distance < min_distance)
{
min_distance=distance;
j=i;
}
}
kmeans_pixels[id][j].red+=QuantumScale*GetPixelRed(image,q);
kmeans_pixels[id][j].green+=QuantumScale*GetPixelGreen(image,q);
kmeans_pixels[id][j].blue+=QuantumScale*GetPixelBlue(image,q);
if (image->alpha_trait != BlendPixelTrait)
kmeans_pixels[id][j].alpha+=QuantumScale*GetPixelAlpha(image,q);
if (image->colorspace == CMYKColorspace)
kmeans_pixels[id][j].black+=QuantumScale*GetPixelBlack(image,q);
kmeans_pixels[id][j].count++;
kmeans_pixels[id][j].distortion+=min_distance;
SetPixelIndex(image,(Quantum) j,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
if (status == MagickFalse)
break;
/*
Reduce sums to [0] entry.
*/
for (i=1; i < (ssize_t) number_threads; i++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) image->colors; j++)
{
kmeans_pixels[0][j].red+=kmeans_pixels[i][j].red;
kmeans_pixels[0][j].green+=kmeans_pixels[i][j].green;
kmeans_pixels[0][j].blue+=kmeans_pixels[i][j].blue;
if (image->alpha_trait != BlendPixelTrait)
kmeans_pixels[0][j].alpha+=kmeans_pixels[i][j].alpha;
if (image->colorspace == CMYKColorspace)
kmeans_pixels[0][j].black+=kmeans_pixels[i][j].black;
kmeans_pixels[0][j].count+=kmeans_pixels[i][j].count;
kmeans_pixels[0][j].distortion+=kmeans_pixels[i][j].distortion;
}
}
/*
Calculate the new means (centroids) of the pixels in the new clusters.
*/
distortion=0.0;
for (i=0; i < (ssize_t) image->colors; i++)
{
double
gamma;
gamma=PerceptibleReciprocal((double) kmeans_pixels[0][i].count);
image->colormap[i].red=gamma*QuantumRange*kmeans_pixels[0][i].red;
image->colormap[i].green=gamma*QuantumRange*kmeans_pixels[0][i].green;
image->colormap[i].blue=gamma*QuantumRange*kmeans_pixels[0][i].blue;
if (image->alpha_trait != BlendPixelTrait)
image->colormap[i].alpha=gamma*QuantumRange*kmeans_pixels[0][i].alpha;
if (image->colorspace == CMYKColorspace)
image->colormap[i].black=gamma*QuantumRange*kmeans_pixels[0][i].black;
distortion+=kmeans_pixels[0][i].distortion;
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,"distortion[%.20g]: %*g %*g\n",(double) n,
GetMagickPrecision(),distortion,GetMagickPrecision(),
fabs(distortion-previous_tolerance));
if (fabs(distortion-previous_tolerance) <= tolerance)
break;
previous_tolerance=distortion;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n,
max_iterations);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
kmeans_pixels=DestroyKmeansThreadSet(kmeans_pixels);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
(void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType)
max_iterations-1,max_iterations);
if (status == MagickFalse)
return(status);
return(SyncImage(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const DitherMethod dither_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither_method: choose from UndefinedDitherMethod, NoDitherMethod,
% RiemersmaDitherMethod, FloydSteinbergDitherMethod.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const DitherMethod dither_method,ExceptionInfo *exception)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \
MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double)
PosterizePixel(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double)
PosterizePixel(image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double)
PosterizePixel(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double)
PosterizePixel(image->colormap[i].alpha);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait == BlendPixelTrait))
SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither_method=dither_method;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.alpha+=node_info->total_color.alpha;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->alpha_trait != BlendPixelTrait)
{
if (SetImageGray(image,exception) != MagickFalse)
(void) SetGrayscaleImage(image,exception);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2))
depth--;
if ((image->alpha_trait == BlendPixelTrait) && (depth > 5))
depth--;
if (SetImageGray(image,exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images,exception);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither_method != NoDitherMethod)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% double *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,double *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int QuantizeErrorCompare(const void *error_p,const void *error_q)
{
double
*p,
*q;
p=(double *) error_p;
q=(double *) error_q;
if (*p > *q)
return(1);
if (fabs(*q-*p) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
double
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (double *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(double),
QuantizeErrorCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(double *) RelinquishMagickMemory(quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest of the colors
% from the reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images,exception);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelInfo
*color_1,
*color_2;
color_1=(PixelInfo *) x;
color_2=(PixelInfo *) y;
intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)-
GetPixelInfoIntensity((const Image *) NULL,color_2);
if (intensity < (double) INT_MIN)
intensity=(double) INT_MIN;
if (intensity > (double) INT_MAX)
intensity=(double) INT_MAX;
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
register ssize_t
i;
size_t
extent;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColormap() traverses the color cube tree and sets the colormap of
% the image. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the SetImageColormap method is:
%
% MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
% ExceptionInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
size_t
number_colors;
number_colors=MagickMax(cube_info->maximum_colors,cube_info->colors);
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
DefineImageColormap(image,cube_info,cube_info->root);
if (image->colors != number_colors)
{
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors+1,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
return(MagickTrue);
}
|
perftest.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "api/libperf.h"
#include "lib/libperf_int.h"
#include <ucs/sys/string.h>
#include <ucs/sys/sys.h>
#include <ucs/sys/sock.h>
#include <ucs/debug/log.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <netdb.h>
#include <getopt.h>
#include <string.h>
#include <sys/types.h>
#include <sys/poll.h>
#include <locale.h>
#if HAVE_MPI
# include <mpi.h>
#elif HAVE_RTE
# include<rte.h>
#endif
#define MAX_BATCH_FILES 32
#define TL_RESOURCE_NAME_NONE "<none>"
#define TEST_PARAMS_ARGS "t:n:s:W:O:w:D:i:H:oSCqM:r:T:d:x:A:BUm:"
enum {
TEST_FLAG_PRINT_RESULTS = UCS_BIT(0),
TEST_FLAG_PRINT_TEST = UCS_BIT(1),
TEST_FLAG_SET_AFFINITY = UCS_BIT(8),
TEST_FLAG_NUMERIC_FMT = UCS_BIT(9),
TEST_FLAG_PRINT_FINAL = UCS_BIT(10),
TEST_FLAG_PRINT_CSV = UCS_BIT(11)
};
typedef struct sock_rte_group {
int is_server;
int connfd;
} sock_rte_group_t;
typedef struct test_type {
const char *name;
ucx_perf_api_t api;
ucx_perf_cmd_t command;
ucx_perf_test_type_t test_type;
const char *desc;
} test_type_t;
struct perftest_context {
ucx_perf_params_t params;
const char *server_addr;
int port;
int mpi;
unsigned cpu;
unsigned flags;
unsigned num_batch_files;
char *batch_files[MAX_BATCH_FILES];
char *test_names[MAX_BATCH_FILES];
sock_rte_group_t sock_rte_group;
};
test_type_t tests[] = {
{"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG,
"active message latency"},
{"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG,
"put latency"},
{"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG,
"atomic add latency"},
{"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI,
"get latency / bandwidth / message rate"},
{"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic fetch-and-add latency / rate"},
{"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic swap latency / rate"},
{"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic compare-and-swap latency / rate"},
{"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"active message bandwidth / message rate"},
{"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI,
"put bandwidth / message rate"},
{"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic add message rate"},
{"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG,
"tag match latency"},
{"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI,
"tag match bandwidth"},
{"tag_sync_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_PINGPONG,
"tag sync match latency"},
{"tag_sync_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_STREAM_UNI,
"tag sync match bandwidth"},
{"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG,
"put latency"},
{"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI,
"put bandwidth"},
{"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI,
"get latency / bandwidth / message rate"},
{"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic add bandwidth / message rate"},
{"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic fetch-and-add latency / bandwidth / rate"},
{"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic swap latency / bandwidth / rate"},
{"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic compare-and-swap latency / bandwidth / rate"},
{"stream_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"stream bandwidth"},
{"stream_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_PINGPONG,
"stream latency"},
{NULL}
};
static int sock_io(int sock, ssize_t (*sock_call)(int, void *, size_t, int),
int poll_events, void *data, size_t size,
void (*progress)(void *arg), void *arg, const char *name)
{
size_t total = 0;
struct pollfd pfd;
int ret;
while (total < size) {
pfd.fd = sock;
pfd.events = poll_events;
pfd.revents = 0;
ret = poll(&pfd, 1, 1); /* poll for 1ms */
if (ret > 0) {
ucs_assert(ret == 1);
ucs_assert(pfd.revents & poll_events);
ret = sock_call(sock, (char*)data + total, size - total, 0);
if (ret < 0) {
ucs_error("%s() failed: %m", name);
return -1;
}
total += ret;
} else if ((ret < 0) && (errno != EINTR)) {
ucs_error("poll(fd=%d) failed: %m", sock);
return -1;
}
/* progress user context */
if (progress != NULL) {
progress(arg);
}
}
return 0;
}
static int safe_send(int sock, void *data, size_t size,
void (*progress)(void *arg), void *arg)
{
typedef ssize_t (*sock_call)(int, void *, size_t, int);
return sock_io(sock, (sock_call)send, POLLOUT, data, size, progress, arg, "send");
}
static int safe_recv(int sock, void *data, size_t size,
void (*progress)(void *arg), void *arg)
{
return sock_io(sock, recv, POLLIN, data, size, progress, arg, "recv");
}
static void print_progress(char **test_names, unsigned num_names,
const ucx_perf_result_t *result, unsigned flags,
int final)
{
static const char *fmt_csv = "%.0f,%.3f,%.3f,%.3f,%.2f,%.2f,%.0f,%.0f\n";
static const char *fmt_numeric = "%'14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %'11.0f %'11.0f\n";
static const char *fmt_plain = "%14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %11.0f %11.0f\n";
unsigned i;
if (!(flags & TEST_FLAG_PRINT_RESULTS) ||
(!final && (flags & TEST_FLAG_PRINT_FINAL)))
{
return;
}
if (flags & TEST_FLAG_PRINT_CSV) {
for (i = 0; i < num_names; ++i) {
printf("%s,", test_names[i]);
}
}
printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv :
(flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric :
fmt_plain,
(double)result->iters,
result->latency.typical * 1000000.0,
result->latency.moment_average * 1000000.0,
result->latency.total_average * 1000000.0,
result->bandwidth.moment_average / (1024.0 * 1024.0),
result->bandwidth.total_average / (1024.0 * 1024.0),
result->msgrate.moment_average,
result->msgrate.total_average);
fflush(stdout);
}
static void print_header(struct perftest_context *ctx)
{
const char *test_api_str;
const char *test_data_str;
test_type_t *test;
unsigned i;
if (ctx->flags & TEST_FLAG_PRINT_TEST) {
for (test = tests; test->name; ++test) {
if ((test->command == ctx->params.command) && (test->test_type == ctx->params.test_type)) {
break;
}
}
if (test->name != NULL) {
if (test->api == UCX_PERF_API_UCT) {
test_api_str = "transport layer";
switch (ctx->params.uct.data_layout) {
case UCT_PERF_DATA_LAYOUT_SHORT:
test_data_str = "short";
break;
case UCT_PERF_DATA_LAYOUT_BCOPY:
test_data_str = "bcopy";
break;
case UCT_PERF_DATA_LAYOUT_ZCOPY:
test_data_str = "zcopy";
break;
default:
test_data_str = "(undefined)";
break;
}
} else if (test->api == UCX_PERF_API_UCP) {
test_api_str = "protocol layer";
test_data_str = "(automatic)"; /* TODO contig/stride/stream */
} else {
return;
}
printf("+------------------------------------------------------------------------------------------+\n");
printf("| API: %-60s |\n", test_api_str);
printf("| Test: %-60s |\n", test->desc);
printf("| Data layout: %-60s |\n", test_data_str);
printf("| Send memory: %-60s |\n", ucs_memory_type_names[ctx->params.send_mem_type]);
printf("| Recv memory: %-60s |\n", ucs_memory_type_names[ctx->params.recv_mem_type]);
printf("| Message size: %-60zu |\n", ucx_perf_get_message_size(&ctx->params));
}
}
if (ctx->flags & TEST_FLAG_PRINT_CSV) {
if (ctx->flags & TEST_FLAG_PRINT_RESULTS) {
for (i = 0; i < ctx->num_batch_files; ++i) {
printf("%s,", basename(ctx->batch_files[i]));
}
printf("iterations,typical_lat,avg_lat,overall_lat,avg_bw,overall_bw,avg_mr,overall_mr\n");
}
} else {
if (ctx->flags & TEST_FLAG_PRINT_RESULTS) {
printf("+--------------+-----------------------------+---------------------+-----------------------+\n");
printf("| | latency (usec) | bandwidth (MB/s) | message rate (msg/s) |\n");
printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n");
printf("| # iterations | typical | average | overall | average | overall | average | overall |\n");
printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n");
} else if (ctx->flags & TEST_FLAG_PRINT_TEST) {
printf("+------------------------------------------------------------------------------------------+\n");
}
}
}
static void print_test_name(struct perftest_context *ctx)
{
char buf[200];
unsigned i, pos;
if (!(ctx->flags & TEST_FLAG_PRINT_CSV) && (ctx->num_batch_files > 0)) {
strcpy(buf, "+--------------+---------+---------+---------+----------+----------+-----------+-----------+");
pos = 1;
for (i = 0; i < ctx->num_batch_files; ++i) {
if (i != 0) {
buf[pos++] = '/';
}
memcpy(&buf[pos], ctx->test_names[i],
ucs_min(strlen(ctx->test_names[i]), sizeof(buf) - pos - 1));
pos += strlen(ctx->test_names[i]);
}
if (ctx->flags & TEST_FLAG_PRINT_RESULTS) {
printf("%s\n", buf);
}
}
}
static void print_memory_type_usage(void)
{
ucs_memory_type_t it;
for (it = UCS_MEMORY_TYPE_HOST; it < UCS_MEMORY_TYPE_LAST; it++) {
if (ucx_perf_mem_type_allocators[it] != NULL) {
printf(" %s - %s\n",
ucs_memory_type_names[it],
ucs_memory_type_descs[it]);
}
}
}
static void usage(const struct perftest_context *ctx, const char *program)
{
static const char* api_names[] = {
[UCX_PERF_API_UCT] = "UCT",
[UCX_PERF_API_UCP] = "UCP"
};
test_type_t *test;
int UCS_V_UNUSED rank;
#if HAVE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ctx->mpi && (rank != 0)) {
return;
}
#endif
#if HAVE_MPI
printf(" Note: test can be also launched as an MPI application\n");
printf("\n");
#elif HAVE_RTE
printf(" Note: this test can be also launched as an libRTE application\n");
printf("\n");
#endif
printf(" Usage: %s [ server-hostname ] [ options ]\n", program);
printf("\n");
printf(" Common options:\n");
printf(" -t <test> test to run:\n");
for (test = tests; test->name; ++test) {
printf(" %13s - %s %s\n", test->name,
api_names[test->api], test->desc);
}
printf("\n");
printf(" -s <size> list of scatter-gather sizes for single message (%zu)\n",
ctx->params.msg_size_list[0]);
printf(" for example: \"-s 16,48,8192,8192,14\"\n");
printf(" -m <send mem type>[,<recv mem type>]\n");
printf(" memory type of message for sender and receiver (host)\n");
print_memory_type_usage();
printf(" -n <iters> number of iterations to run (%ld)\n", ctx->params.max_iter);
printf(" -w <iters> number of warm-up iterations (%zu)\n",
ctx->params.warmup_iter);
printf(" -c <cpu> set affinity to this CPU (off)\n");
printf(" -O <count> maximal number of uncompleted outstanding sends (%u)\n",
ctx->params.max_outstanding);
printf(" -i <offset> distance between consecutive scatter-gather entries (%zu)\n",
ctx->params.iov_stride);
printf(" -T <threads> number of threads in the test (%d), if >1 implies \"-M multi\"\n",
ctx->params.thread_count);
printf(" -B register memory with NONBLOCK flag\n");
printf(" -b <file> read and execute tests from a batch file: every line in the\n");
printf(" file is a test to run, first word is test name, the rest of\n");
printf(" the line is command-line arguments for the test.\n");
printf(" -p <port> TCP port to use for data exchange (%d)\n", ctx->port);
#if HAVE_MPI
printf(" -P <0|1> disable/enable MPI mode (%d)\n", ctx->mpi);
#endif
printf(" -h show this help message\n");
printf("\n");
printf(" Output format:\n");
printf(" -N use numeric formatting (thousands separator)\n");
printf(" -f print only final numbers\n");
printf(" -v print CSV-formatted output\n");
printf("\n");
printf(" UCT only:\n");
printf(" -d <device> device to use for testing\n");
printf(" -x <tl> transport to use for testing\n");
printf(" -D <layout> data layout for sender side:\n");
printf(" short - short messages (default, cannot be used for get)\n");
printf(" bcopy - copy-out (cannot be used for atomics)\n");
printf(" zcopy - zero-copy (cannot be used for atomics)\n");
printf(" iov - scatter-gather list (iovec)\n");
printf(" -W <count> flow control window size, for active messages (%u)\n",
ctx->params.uct.fc_window);
printf(" -H <size> active message header size (%zu)\n",
ctx->params.am_hdr_size);
printf(" -A <mode> asynchronous progress mode (thread_spinlock)\n");
printf(" thread_spinlock - separate progress thread with spin locking\n");
printf(" thread_mutex - separate progress thread with mutex locking\n");
printf(" signal - signal-based timer\n");
printf("\n");
printf(" UCP only:\n");
printf(" -M <thread> thread support level for progress engine (single)\n");
printf(" single - only the master thread can access\n");
printf(" serialized - one thread can access at a time\n");
printf(" multi - multiple threads can access\n");
printf(" -D <layout>[,<layout>]\n");
printf(" data layout for sender and receiver side (contig)\n");
printf(" contig - Continuous datatype\n");
printf(" iov - Scatter-gather list\n");
printf(" -C use wild-card tag for tag tests\n");
printf(" -U force unexpected flow by using tag probe\n");
printf(" -r <mode> receive mode for stream tests (recv)\n");
printf(" recv : Use ucp_stream_recv_nb\n");
printf(" recv_data : Use ucp_stream_recv_data_nb\n");
printf("\n");
printf(" NOTE: When running UCP tests, transport and device should be specified by\n");
printf(" environment variables: UCX_TLS and UCX_[SELF|SHM|NET]_DEVICES.\n");
printf("\n");
}
static ucs_status_t parse_ucp_datatype_params(const char *optarg,
ucp_perf_datatype_t *datatype)
{
const char *iov_type = "iov";
const size_t iov_type_size = strlen("iov");
const char *contig_type = "contig";
const size_t contig_type_size = strlen("contig");
if (0 == strncmp(optarg, iov_type, iov_type_size)) {
*datatype = UCP_PERF_DATATYPE_IOV;
} else if (0 == strncmp(optarg, contig_type, contig_type_size)) {
*datatype = UCP_PERF_DATATYPE_CONTIG;
} else {
return UCS_ERR_INVALID_PARAM;
}
return UCS_OK;
}
static ucs_status_t parse_mem_type(const char *optarg,
ucs_memory_type_t *mem_type)
{
ucs_memory_type_t it;
for (it = UCS_MEMORY_TYPE_HOST; it < UCS_MEMORY_TYPE_LAST; it++) {
if(!strcmp(optarg, ucs_memory_type_names[it]) &&
(ucx_perf_mem_type_allocators[it] != NULL)) {
*mem_type = it;
return UCS_OK;
}
}
ucs_error("Unsupported memory type: \"%s\"", optarg);
return UCS_ERR_INVALID_PARAM;
}
static ucs_status_t parse_mem_type_params(const char *optarg,
ucs_memory_type_t *send_mem_type,
ucs_memory_type_t *recv_mem_type)
{
const char *delim = ",";
char *token = strtok((char*)optarg, delim);
if (UCS_OK != parse_mem_type(token, send_mem_type)) {
return UCS_ERR_INVALID_PARAM;
}
token = strtok(NULL, delim);
if (NULL == token) {
*recv_mem_type = *send_mem_type;
return UCS_OK;
} else {
return parse_mem_type(token, recv_mem_type);
}
}
static ucs_status_t parse_message_sizes_params(const char *optarg,
ucx_perf_params_t *params)
{
const char delim = ',';
size_t *msg_size_list, token_num, token_it;
char *optarg_ptr, *optarg_ptr2;
optarg_ptr = (char *)optarg;
token_num = 0;
/* count the number of given message sizes */
while ((optarg_ptr = strchr(optarg_ptr, delim)) != NULL) {
++optarg_ptr;
++token_num;
}
++token_num;
msg_size_list = realloc(params->msg_size_list,
sizeof(*params->msg_size_list) * token_num);
if (NULL == msg_size_list) {
return UCS_ERR_NO_MEMORY;
}
params->msg_size_list = msg_size_list;
optarg_ptr = (char *)optarg;
errno = 0;
for (token_it = 0; token_it < token_num; ++token_it) {
params->msg_size_list[token_it] = strtoul(optarg_ptr, &optarg_ptr2, 10);
if (((ERANGE == errno) && (ULONG_MAX == params->msg_size_list[token_it])) ||
((errno != 0) && (params->msg_size_list[token_it] == 0)) ||
(optarg_ptr == optarg_ptr2)) {
free(params->msg_size_list);
params->msg_size_list = NULL; /* prevent double free */
ucs_error("Invalid option substring argument at position %lu", token_it);
return UCS_ERR_INVALID_PARAM;
}
optarg_ptr = optarg_ptr2 + 1;
}
params->msg_size_cnt = token_num;
return UCS_OK;
}
static ucs_status_t init_test_params(ucx_perf_params_t *params)
{
memset(params, 0, sizeof(*params));
params->api = UCX_PERF_API_LAST;
params->command = UCX_PERF_CMD_LAST;
params->test_type = UCX_PERF_TEST_TYPE_LAST;
params->thread_mode = UCS_THREAD_MODE_SINGLE;
params->thread_count = 1;
params->async_mode = UCS_ASYNC_THREAD_LOCK_TYPE;
params->wait_mode = UCX_PERF_WAIT_MODE_LAST;
params->max_outstanding = 1;
params->warmup_iter = 10000;
params->am_hdr_size = 8;
params->alignment = ucs_get_page_size();
params->max_iter = 1000000l;
params->max_time = 0.0;
params->report_interval = 1.0;
params->flags = UCX_PERF_TEST_FLAG_VERBOSE;
params->uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW;
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT;
params->send_mem_type = UCS_MEMORY_TYPE_HOST;
params->recv_mem_type = UCS_MEMORY_TYPE_HOST;
params->msg_size_cnt = 1;
params->iov_stride = 0;
params->ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG;
params->ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG;
strcpy(params->uct.dev_name, TL_RESOURCE_NAME_NONE);
strcpy(params->uct.tl_name, TL_RESOURCE_NAME_NONE);
params->msg_size_list = calloc(params->msg_size_cnt,
sizeof(*params->msg_size_list));
if (params->msg_size_list == NULL) {
return UCS_ERR_NO_MEMORY;
}
params->msg_size_list[0] = 8;
return UCS_OK;
}
static ucs_status_t parse_test_params(ucx_perf_params_t *params, char opt, const char *optarg)
{
test_type_t *test;
char *optarg2 = NULL;
switch (opt) {
case 'd':
ucs_snprintf_zero(params->uct.dev_name, sizeof(params->uct.dev_name),
"%s", optarg);
return UCS_OK;
case 'x':
ucs_snprintf_zero(params->uct.tl_name, sizeof(params->uct.tl_name),
"%s", optarg);
return UCS_OK;
case 't':
for (test = tests; test->name; ++test) {
if (!strcmp(optarg, test->name)) {
params->api = test->api;
params->command = test->command;
params->test_type = test->test_type;
break;
}
}
if (test->name == NULL) {
ucs_error("Invalid option argument for -t");
return UCS_ERR_INVALID_PARAM;
}
return UCS_OK;
case 'D':
if (!strcmp(optarg, "short")) {
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT;
} else if (!strcmp(optarg, "bcopy")) {
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_BCOPY;
} else if (!strcmp(optarg, "zcopy")) {
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_ZCOPY;
} else if (UCS_OK == parse_ucp_datatype_params(optarg,
¶ms->ucp.send_datatype)) {
optarg2 = strchr(optarg, ',');
if (optarg2) {
if (UCS_OK != parse_ucp_datatype_params(optarg2 + 1,
¶ms->ucp.recv_datatype)) {
return UCS_ERR_INVALID_PARAM;
}
}
} else {
ucs_error("Invalid option argument for -D");
return UCS_ERR_INVALID_PARAM;
}
return UCS_OK;
case 'i':
params->iov_stride = atol(optarg);
return UCS_OK;
case 'n':
params->max_iter = atol(optarg);
return UCS_OK;
case 's':
return parse_message_sizes_params(optarg, params);
case 'H':
params->am_hdr_size = atol(optarg);
return UCS_OK;
case 'W':
params->uct.fc_window = atoi(optarg);
return UCS_OK;
case 'O':
params->max_outstanding = atoi(optarg);
return UCS_OK;
case 'w':
params->warmup_iter = atol(optarg);
return UCS_OK;
case 'o':
params->flags |= UCX_PERF_TEST_FLAG_ONE_SIDED;
return UCS_OK;
case 'B':
params->flags |= UCX_PERF_TEST_FLAG_MAP_NONBLOCK;
return UCS_OK;
case 'q':
params->flags &= ~UCX_PERF_TEST_FLAG_VERBOSE;
return UCS_OK;
case 'C':
params->flags |= UCX_PERF_TEST_FLAG_TAG_WILDCARD;
return UCS_OK;
case 'U':
params->flags |= UCX_PERF_TEST_FLAG_TAG_UNEXP_PROBE;
return UCS_OK;
case 'M':
if (!strcmp(optarg, "single")) {
params->thread_mode = UCS_THREAD_MODE_SINGLE;
return UCS_OK;
} else if (!strcmp(optarg, "serialized")) {
params->thread_mode = UCS_THREAD_MODE_SERIALIZED;
return UCS_OK;
} else if (!strcmp(optarg, "multi")) {
params->thread_mode = UCS_THREAD_MODE_MULTI;
return UCS_OK;
} else {
ucs_error("Invalid option argument for -M");
return UCS_ERR_INVALID_PARAM;
}
case 'T':
params->thread_count = atoi(optarg);
params->thread_mode = UCS_THREAD_MODE_MULTI;
return UCS_OK;
case 'A':
if (!strcmp(optarg, "thread") || !strcmp(optarg, "thread_spinlock")) {
params->async_mode = UCS_ASYNC_MODE_THREAD_SPINLOCK;
return UCS_OK;
} else if (!strcmp(optarg, "thread_mutex")) {
params->async_mode = UCS_ASYNC_MODE_THREAD_MUTEX;
return UCS_OK;
} else if (!strcmp(optarg, "signal")) {
params->async_mode = UCS_ASYNC_MODE_SIGNAL;
return UCS_OK;
} else {
ucs_error("Invalid option argument for -A");
return UCS_ERR_INVALID_PARAM;
}
case 'r':
if (!strcmp(optarg, "recv_data")) {
params->flags |= UCX_PERF_TEST_FLAG_STREAM_RECV_DATA;
return UCS_OK;
} else if (!strcmp(optarg, "recv")) {
params->flags &= ~UCX_PERF_TEST_FLAG_STREAM_RECV_DATA;
return UCS_OK;
}
return UCS_ERR_INVALID_PARAM;
case 'm':
if (UCS_OK != parse_mem_type_params(optarg,
¶ms->send_mem_type,
¶ms->recv_mem_type)) {
return UCS_ERR_INVALID_PARAM;
}
return UCS_OK;
default:
return UCS_ERR_INVALID_PARAM;
}
}
static ucs_status_t read_batch_file(FILE *batch_file, const char *file_name,
int *line_num, ucx_perf_params_t *params,
char** test_name_p)
{
#define MAX_SIZE 256
#define MAX_ARG_SIZE 2048
ucs_status_t status;
char buf[MAX_ARG_SIZE];
int argc;
char *argv[MAX_SIZE + 1];
int c;
char *p;
do {
if (fgets(buf, sizeof(buf) - 1, batch_file) == NULL) {
return UCS_ERR_NO_ELEM;
}
++(*line_num);
argc = 0;
p = strtok(buf, " \t\n\r");
while (p && (argc < MAX_SIZE)) {
argv[argc++] = p;
p = strtok(NULL, " \t\n\r");
}
argv[argc] = NULL;
} while ((argc == 0) || (argv[0][0] == '#'));
optind = 1;
while ((c = getopt (argc, argv, TEST_PARAMS_ARGS)) != -1) {
status = parse_test_params(params, c, optarg);
if (status != UCS_OK) {
ucs_error("in batch file '%s' line %d: -%c %s: %s",
file_name, *line_num, c, optarg, ucs_status_string(status));
return status;
}
}
*test_name_p = strdup(argv[0]);
return UCS_OK;
}
static ucs_status_t parse_opts(struct perftest_context *ctx, int mpi_initialized,
int argc, char **argv)
{
ucs_status_t status;
int c;
ucs_trace_func("");
ucx_perf_global_init(); /* initialize memory types */
status = init_test_params(&ctx->params);
if (status != UCS_OK) {
return status;
}
ctx->server_addr = NULL;
ctx->num_batch_files = 0;
ctx->port = 13337;
ctx->flags = 0;
ctx->mpi = mpi_initialized;
optind = 1;
while ((c = getopt (argc, argv, "p:b:Nfvc:P:h" TEST_PARAMS_ARGS)) != -1) {
switch (c) {
case 'p':
ctx->port = atoi(optarg);
break;
case 'b':
if (ctx->num_batch_files < MAX_BATCH_FILES) {
ctx->batch_files[ctx->num_batch_files++] = optarg;
}
break;
case 'N':
ctx->flags |= TEST_FLAG_NUMERIC_FMT;
break;
case 'f':
ctx->flags |= TEST_FLAG_PRINT_FINAL;
break;
case 'v':
ctx->flags |= TEST_FLAG_PRINT_CSV;
break;
case 'c':
ctx->flags |= TEST_FLAG_SET_AFFINITY;
ctx->cpu = atoi(optarg);
break;
case 'P':
#if HAVE_MPI
ctx->mpi = atoi(optarg) && mpi_initialized;
break;
#endif
case 'h':
usage(ctx, ucs_basename(argv[0]));
return UCS_ERR_CANCELED;
default:
status = parse_test_params(&ctx->params, c, optarg);
if (status != UCS_OK) {
usage(ctx, ucs_basename(argv[0]));
return status;
}
break;
}
}
if (optind < argc) {
ctx->server_addr = argv[optind];
}
return UCS_OK;
}
static unsigned sock_rte_group_size(void *rte_group)
{
return 2;
}
static unsigned sock_rte_group_index(void *rte_group)
{
sock_rte_group_t *group = rte_group;
return group->is_server ? 0 : 1;
}
static void sock_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
#pragma omp barrier
#pragma omp master
{
sock_rte_group_t *group = rte_group;
const unsigned magic = 0xdeadbeef;
unsigned sync;
sync = magic;
safe_send(group->connfd, &sync, sizeof(unsigned), progress, arg);
sync = 0;
safe_recv(group->connfd, &sync, sizeof(unsigned), progress, arg);
ucs_assert(sync == magic);
}
#pragma omp barrier
}
static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec,
int iovcnt, void **req)
{
sock_rte_group_t *group = rte_group;
size_t size;
int i;
size = 0;
for (i = 0; i < iovcnt; ++i) {
size += iovec[i].iov_len;
}
safe_send(group->connfd, &size, sizeof(size), NULL, NULL);
for (i = 0; i < iovcnt; ++i) {
safe_send(group->connfd, iovec[i].iov_base, iovec[i].iov_len, NULL,
NULL);
}
}
static void sock_rte_recv(void *rte_group, unsigned src, void *buffer,
size_t max, void *req)
{
sock_rte_group_t *group = rte_group;
int group_index;
size_t size;
group_index = sock_rte_group_index(rte_group);
if (src == group_index) {
return;
}
ucs_assert_always(src == (1 - group_index));
safe_recv(group->connfd, &size, sizeof(size), NULL, NULL);
ucs_assert_always(size <= max);
safe_recv(group->connfd, buffer, size, NULL, NULL);
}
static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final);
}
static ucx_perf_rte_t sock_rte = {
.group_size = sock_rte_group_size,
.group_index = sock_rte_group_index,
.barrier = sock_rte_barrier,
.post_vec = sock_rte_post_vec,
.recv = sock_rte_recv,
.exchange_vec = (ucx_perf_rte_exchange_vec_func_t)ucs_empty_function,
.report = sock_rte_report,
};
static ucs_status_t setup_sock_rte(struct perftest_context *ctx)
{
struct sockaddr_in inaddr;
struct hostent *he;
ucs_status_t status;
int optval = 1;
int sockfd, connfd;
int ret;
sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (sockfd < 0) {
ucs_error("socket() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err;
}
if (ctx->server_addr == NULL) {
optval = 1;
status = ucs_socket_setopt(sockfd, SOL_SOCKET, SO_REUSEADDR,
&optval, sizeof(optval));
if (status != UCS_OK) {
goto err_close_sockfd;
}
inaddr.sin_family = AF_INET;
inaddr.sin_port = htons(ctx->port);
inaddr.sin_addr.s_addr = INADDR_ANY;
memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero));
ret = bind(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr));
if (ret < 0) {
ucs_error("bind() failed: %m");
status = UCS_ERR_INVALID_ADDR;
goto err_close_sockfd;
}
ret = listen(sockfd, 10);
if (ret < 0) {
ucs_error("listen() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err_close_sockfd;
}
printf("Waiting for connection...\n");
/* Accept next connection */
connfd = accept(sockfd, NULL, NULL);
if (connfd < 0) {
ucs_error("accept() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err_close_sockfd;
}
close(sockfd);
ret = safe_recv(connfd, &ctx->params, sizeof(ctx->params), NULL, NULL);
if (ret) {
status = UCS_ERR_IO_ERROR;
goto err_close_connfd;
}
if (ctx->params.msg_size_cnt) {
ctx->params.msg_size_list = calloc(ctx->params.msg_size_cnt,
sizeof(*ctx->params.msg_size_list));
if (NULL == ctx->params.msg_size_list) {
status = UCS_ERR_NO_MEMORY;
goto err_close_connfd;
}
ret = safe_recv(connfd, ctx->params.msg_size_list,
sizeof(*ctx->params.msg_size_list) *
ctx->params.msg_size_cnt,
NULL, NULL);
if (ret) {
status = UCS_ERR_IO_ERROR;
goto err_close_connfd;
}
}
ctx->sock_rte_group.connfd = connfd;
ctx->sock_rte_group.is_server = 1;
} else {
he = gethostbyname(ctx->server_addr);
if (he == NULL || he->h_addr_list == NULL) {
ucs_error("host %s not found: %s", ctx->server_addr,
hstrerror(h_errno));
status = UCS_ERR_INVALID_ADDR;
goto err_close_sockfd;
}
inaddr.sin_family = he->h_addrtype;
inaddr.sin_port = htons(ctx->port);
ucs_assert(he->h_length == sizeof(inaddr.sin_addr));
memcpy(&inaddr.sin_addr, he->h_addr_list[0], he->h_length);
memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero));
ret = connect(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr));
if (ret < 0) {
ucs_error("connect() failed: %m");
status = UCS_ERR_UNREACHABLE;
goto err_close_sockfd;
}
safe_send(sockfd, &ctx->params, sizeof(ctx->params), NULL, NULL);
if (ctx->params.msg_size_cnt) {
safe_send(sockfd, ctx->params.msg_size_list,
sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt,
NULL, NULL);
}
ctx->sock_rte_group.connfd = sockfd;
ctx->sock_rte_group.is_server = 0;
}
if (ctx->sock_rte_group.is_server) {
ctx->flags |= TEST_FLAG_PRINT_TEST;
} else {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.rte_group = &ctx->sock_rte_group;
ctx->params.rte = &sock_rte;
ctx->params.report_arg = ctx;
return UCS_OK;
err_close_connfd:
close(connfd);
goto err;
err_close_sockfd:
close(sockfd);
err:
return status;
}
static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx)
{
close(ctx->sock_rte_group.connfd);
return UCS_OK;
}
#if HAVE_MPI
static unsigned mpi_rte_group_size(void *rte_group)
{
int size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
return size;
}
static unsigned mpi_rte_group_index(void *rte_group)
{
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
return rank;
}
static void mpi_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
int group_size, my_rank, i;
MPI_Request *reqs;
int nreqs = 0;
int dummy;
int flag;
#pragma omp barrier
#pragma omp master
/*
* Naive non-blocking barrier implementation over send/recv, to call user
* progress while waiting for completion.
* Not using MPI_Ibarrier to be compatible with MPI-1.
*/
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &group_size);
/* allocate maximal possible number of requests */
reqs = (MPI_Request*)alloca(sizeof(*reqs) * group_size);
if (my_rank == 0) {
/* root gathers "ping" from all other ranks */
for (i = 1; i < group_size; ++i) {
MPI_Irecv(&dummy, 0, MPI_INT,
i /* source */,
1 /* tag */,
MPI_COMM_WORLD,
&reqs[nreqs++]);
}
} else {
/* every non-root rank sends "ping" and waits for "pong" */
MPI_Send(&dummy, 0, MPI_INT,
0 /* dest */,
1 /* tag */,
MPI_COMM_WORLD);
MPI_Irecv(&dummy, 0, MPI_INT,
0 /* source */,
2 /* tag */,
MPI_COMM_WORLD,
&reqs[nreqs++]);
}
/* Waiting for receive requests */
do {
MPI_Testall(nreqs, reqs, &flag, MPI_STATUSES_IGNORE);
progress(arg);
} while (!flag);
if (my_rank == 0) {
/* root sends "pong" to all ranks */
for (i = 1; i < group_size; ++i) {
MPI_Send(&dummy, 0, MPI_INT,
i /* dest */,
2 /* tag */,
MPI_COMM_WORLD);
}
}
#pragma omp barrier
}
static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec,
int iovcnt, void **req)
{
int group_size;
int my_rank;
int dest, i;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &group_size);
for (dest = 0; dest < group_size; ++dest) {
if (dest == my_rank) {
continue;
}
for (i = 0; i < iovcnt; ++i) {
MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest,
i == (iovcnt - 1), /* Send last iov with tag == 1 */
MPI_COMM_WORLD);
}
}
*req = (void*)(uintptr_t)1;
}
static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max,
void *req)
{
MPI_Status status;
size_t offset;
int my_rank;
int count;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if (src == my_rank) {
return;
}
offset = 0;
do {
ucs_assert_always(offset < max);
MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
MPI_Get_count(&status, MPI_BYTE, &count);
offset += count;
} while (status.MPI_TAG != 1);
}
static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final);
}
static ucx_perf_rte_t mpi_rte = {
.group_size = mpi_rte_group_size,
.group_index = mpi_rte_group_index,
.barrier = mpi_rte_barrier,
.post_vec = mpi_rte_post_vec,
.recv = mpi_rte_recv,
.exchange_vec = (void*)ucs_empty_function,
.report = mpi_rte_report,
};
#elif HAVE_RTE
static unsigned ext_rte_group_size(void *rte_group)
{
rte_group_t group = (rte_group_t)rte_group;
return rte_group_size(group);
}
static unsigned ext_rte_group_index(void *rte_group)
{
rte_group_t group = (rte_group_t)rte_group;
return rte_group_rank(group);
}
static void ext_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
#pragma omp barrier
#pragma omp master
{
rte_group_t group = (rte_group_t)rte_group;
int rc;
rc = rte_barrier(group);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_barrier");
}
}
#pragma omp barrier
}
static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec,
int iovcnt, void **req)
{
rte_group_t group = (rte_group_t)rte_group;
rte_srs_session_t session;
rte_iovec_t *r_vec;
int i, rc;
rc = rte_srs_session_create(group, 0, &session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_session_create");
}
r_vec = calloc(iovcnt, sizeof(rte_iovec_t));
if (r_vec == NULL) {
return;
}
for (i = 0; i < iovcnt; ++i) {
r_vec[i].iov_base = iovec[i].iov_base;
r_vec[i].type = rte_datatype_uint8_t;
r_vec[i].count = iovec[i].iov_len;
}
rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_set_data");
}
*req = session;
free(r_vec);
}
static void ext_rte_recv(void *rte_group, unsigned src, void *buffer,
size_t max, void *req)
{
rte_group_t group = (rte_group_t)rte_group;
rte_srs_session_t session = (rte_srs_session_t)req;
void *rte_buffer = NULL;
rte_iovec_t r_vec;
uint32_t offset;
int size;
int rc;
rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src),
"KEY_PERF", &rte_buffer, &size);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_get_data");
return;
}
r_vec.iov_base = buffer;
r_vec.type = rte_datatype_uint8_t;
r_vec.count = max;
offset = 0;
rte_unpack(&r_vec, rte_buffer, &offset);
rc = rte_srs_session_destroy(session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_session_destroy");
}
free(rte_buffer);
}
static void ext_rte_exchange_vec(void *rte_group, void * req)
{
rte_srs_session_t session = (rte_srs_session_t)req;
int rc;
rc = rte_srs_exchange_data(session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_exchange_data");
}
}
static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final);
}
static ucx_perf_rte_t ext_rte = {
.group_size = ext_rte_group_size,
.group_index = ext_rte_group_index,
.barrier = ext_rte_barrier,
.report = ext_rte_report,
.post_vec = ext_rte_post_vec,
.recv = ext_rte_recv,
.exchange_vec = ext_rte_exchange_vec,
};
#endif
static ucs_status_t setup_mpi_rte(struct perftest_context *ctx)
{
ucs_trace_func("");
#if HAVE_MPI
int size, rank;
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size != 2) {
ucs_error("This test should run with exactly 2 processes (actual: %d)", size);
return UCS_ERR_INVALID_PARAM;
}
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 1) {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.rte_group = NULL;
ctx->params.rte = &mpi_rte;
ctx->params.report_arg = ctx;
#elif HAVE_RTE
rte_group_t group;
rte_init(NULL, NULL, &group);
if (1 == rte_group_rank(group)) {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.rte_group = group;
ctx->params.rte = &ext_rte;
ctx->params.report_arg = ctx;
#endif
return UCS_OK;
}
static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx)
{
#if HAVE_RTE
rte_finalize();
#endif
return UCS_OK;
}
static ucs_status_t check_system(struct perftest_context *ctx)
{
ucs_sys_cpuset_t cpuset;
unsigned i, count, nr_cpus;
int ret;
ucs_trace_func("");
ret = sysconf(_SC_NPROCESSORS_CONF);
if (ret < 0) {
ucs_error("failed to get local cpu count: %m");
return UCS_ERR_INVALID_PARAM;
}
nr_cpus = ret;
memset(&cpuset, 0, sizeof(cpuset));
if (ctx->flags & TEST_FLAG_SET_AFFINITY) {
if (ctx->cpu >= nr_cpus) {
ucs_error("cpu (%u) ot of range (0..%u)", ctx->cpu, nr_cpus - 1);
return UCS_ERR_INVALID_PARAM;
}
CPU_SET(ctx->cpu, &cpuset);
ret = ucs_sys_setaffinity(&cpuset);
if (ret) {
ucs_warn("sched_setaffinity() failed: %m");
return UCS_ERR_INVALID_PARAM;
}
} else {
ret = ucs_sys_getaffinity(&cpuset);
if (ret) {
ucs_warn("sched_getaffinity() failed: %m");
return UCS_ERR_INVALID_PARAM;
}
count = 0;
for (i = 0; i < CPU_SETSIZE; ++i) {
if (CPU_ISSET(i, &cpuset)) {
++count;
}
}
if (count > 2) {
ucs_warn("CPU affinity is not set (bound to %u cpus)."
" Performance may be impacted.", count);
}
}
return UCS_OK;
}
static ucs_status_t clone_params(ucx_perf_params_t *dest,
const ucx_perf_params_t *src)
{
size_t msg_size_list_size;
*dest = *src;
msg_size_list_size = dest->msg_size_cnt * sizeof(*dest->msg_size_list);
dest->msg_size_list = malloc(msg_size_list_size);
if (dest->msg_size_list == NULL) {
return ((msg_size_list_size != 0) ? UCS_ERR_NO_MEMORY : UCS_OK);
}
memcpy(dest->msg_size_list, src->msg_size_list, msg_size_list_size);
return UCS_OK;
}
static ucs_status_t run_test_recurs(struct perftest_context *ctx,
ucx_perf_params_t *parent_params,
unsigned depth)
{
ucx_perf_params_t params;
ucx_perf_result_t result;
ucs_status_t status;
FILE *batch_file;
int line_num;
ucs_trace_func("depth=%u, num_files=%u", depth, ctx->num_batch_files);
if (parent_params->api == UCX_PERF_API_UCP) {
if (strcmp(parent_params->uct.dev_name, TL_RESOURCE_NAME_NONE)) {
ucs_warn("-d '%s' ignored for UCP test; see NOTES section in help message",
parent_params->uct.dev_name);
}
if (strcmp(parent_params->uct.tl_name, TL_RESOURCE_NAME_NONE)) {
ucs_warn("-x '%s' ignored for UCP test; see NOTES section in help message",
parent_params->uct.tl_name);
}
}
if (depth >= ctx->num_batch_files) {
print_test_name(ctx);
return ucx_perf_run(parent_params, &result);
}
batch_file = fopen(ctx->batch_files[depth], "r");
if (batch_file == NULL) {
ucs_error("Failed to open batch file '%s': %m", ctx->batch_files[depth]);
return UCS_ERR_IO_ERROR;
}
status = clone_params(¶ms, parent_params);
if (status != UCS_OK) {
goto out;
}
line_num = 0;
while ((status = read_batch_file(batch_file, ctx->batch_files[depth],
&line_num, ¶ms,
&ctx->test_names[depth])) == UCS_OK) {
run_test_recurs(ctx, ¶ms, depth + 1);
free(params.msg_size_list);
free(ctx->test_names[depth]);
ctx->test_names[depth] = NULL;
status = clone_params(¶ms, parent_params);
if (status != UCS_OK) {
goto out;
}
}
if (status == UCS_ERR_NO_ELEM) {
status = UCS_OK;
}
free(params.msg_size_list);
out:
fclose(batch_file);
return status;
}
static ucs_status_t run_test(struct perftest_context *ctx)
{
ucs_status_t status;
ucs_trace_func("");
setlocale(LC_ALL, "en_US");
print_header(ctx);
status = run_test_recurs(ctx, &ctx->params, 0);
if (status != UCS_OK) {
ucs_error("Failed to run test: %s", ucs_status_string(status));
}
return status;
}
int main(int argc, char **argv)
{
struct perftest_context ctx;
ucs_status_t status;
int mpi_initialized;
int mpi_rte;
int ret;
#if HAVE_MPI
mpi_initialized = !isatty(0) && (MPI_Init(&argc, &argv) == 0);
#else
mpi_initialized = 0;
#endif
/* Parse command line */
status = parse_opts(&ctx, mpi_initialized, argc, argv);
if (status != UCS_OK) {
ret = (status == UCS_ERR_CANCELED) ? 0 : -127;
goto out;
}
#ifdef __COVERITY__
/* coverity[dont_call] */
mpi_rte = rand(); /* Shut up deadcode error */
#endif
if (ctx.mpi) {
mpi_rte = 1;
} else {
#if HAVE_RTE
mpi_rte = 1;
#else
mpi_rte = 0;
#endif
}
status = check_system(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out;
}
/* Create RTE */
status = (mpi_rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out;
}
/* Run the test */
status = run_test(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out_cleanup_rte;
}
ret = 0;
out_cleanup_rte:
(mpi_rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx);
out:
if (ctx.params.msg_size_list) {
free(ctx.params.msg_size_list);
}
if (mpi_initialized) {
#if HAVE_MPI
MPI_Finalize();
#endif
}
return ret;
}
|
gooseberry_bin.c | /**
* @file bowstring_bin.c
* @brief Command line interface for gooseberry
* @author Dominique LaSalle <lasalle@cs.umn.edu>
* Copyright 2014
* @version 1
* @date 2014-04-28
*/
#ifndef GOOSEBERRY_C
#define GOOSEBERRY_C
#include "base.h"
#include "matrix.h"
#include "io.h"
#include "blas.h"
#include "cgd.h"
#include "permute.h"
#include "analyze.h"
/******************************************************************************
* MACROS **********************************************************************
******************************************************************************/
#define ARRAY_SIZE(a) \
(sizeof(a) > 0 ? (sizeof(a) / sizeof((a)[0])) : 0)
#ifndef NO_OMP
#define DEFAULT_NUMTHREADS omp_get_max_threads()
#else
#define DEFAULT_NUMTHREADS 1
#endif
/******************************************************************************
* TYPES ***********************************************************************
******************************************************************************/
/* COMMANDS ******************************************************************/
typedef enum command_t {
COMMAND_HELP,
COMMAND_ANALYSIS,
COMMAND_PERMUTE,
COMMAND_TRANSFORM,
COMMAND_GENERATE,
COMMAND_BLAS,
COMMAND_CGD,
COMMAND_SGD,
COMMAND_PAGERANK
} command_t;
/* ANALYSIS ******************************************************************/
typedef enum analysis_t {
ANALYSIS_MATRIXSTATS,
ANALYSIS_CHOLESKY
} analysis_t;
typedef enum analysis_option_t {
ANALYSIS_OPTION_HELP,
ANALYSIS_OPTION_INFILE,
ANALYSIS_OPTION_TIME,
ANALYSIS_OPTION_TYPE,
ANALYSIS_OPTION_PERMFILE
} analysis_option_t;
/* PERMUTE *******************************************************************/
typedef enum permute_option_t {
PERMUTE_OPTION_HELP,
PERMUTE_OPTION_INFILE,
PERMUTE_OPTION_OUTFILE,
PERMUTE_OPTION_PERMUTATION,
PERMUTE_OPTION_TIME,
PERMUTE_OPTION_ROWPERM,
PERMUTE_OPTION_COLPERM
} permute_option_t;
typedef enum permute_permutation_t {
PERMUTE_PERMUTATION_FILE,
PERMUTE_PERMUTATION_RANDOM,
PERMUTE_PERMUTATION_ROWRANDOM,
PERMUTE_PERMUTATION_COLRANDOM,
PERMUTE_PERMUTATION_BANDWIDTH
} permute_permutation_t;
/* TRANSFORM *****************************************************************/
typedef enum transform_option_t {
TRANSFORM_OPTION_HELP,
TRANSFORM_OPTION_INFILE,
TRANSFORM_OPTION_OUTFILE,
TRANSFORM_OPTION_PARTFILE,
TRANSFORM_OPTION_TIME,
TRANSFORM_OPTION_OPERATION
} transform_option_t;
typedef enum transform_operation_t {
TRANSFORM_OPERATION_CONVERT,
TRANSFORM_OPERATION_SYMMETRIFY,
TRANSFORM_OPERATION_DEBIPARTIFY,
TRANSFORM_OPERATION_ROWSPLIT,
TRANSFORM_OPERATION_COLSPLIT,
TRANSFORM_OPERATION_ROWJOIN,
TRANSFORM_OPERATION_COLJOIN,
TRANSFORM_OPERATION_TRANSPOSE
} transform_operation_t;
/* GENERATE ******************************************************************/
typedef enum generate_option_t {
GENERATE_OPTION_HELP,
GENERATE_OPTION_OUTFILE,
GENERATE_OPTION_TYPE,
GENERATE_OPTION_SIZE,
GENERATE_OPTION_TIME
} generate_option_t;
typedef enum generate_type_t {
GENERATE_TYPE_NULL,
GENERATE_TYPE_DENSE_VECTOR
} generate_type_t;
/* BLAS **********************************************************************/
typedef enum blas_option_t {
BLAS_OPTION_HELP,
BLAS_OPTION_OPERATION,
BLAS_OPTION_INFILE,
BLAS_OPTION_OUTFILE,
BLAS_OPTION_TIME,
BLAS_OPTION_RUNS,
BLAS_OPTION_THREADS,
BLAS_OPTION_ROWPERM,
BLAS_OPTION_COLPERM,
BLAS_OPTION_REDUCEBANDWIDTH
} blas_option_t;
typedef enum blas_operation_t {
BLAS_OPERATION_NOOP,
BLAS_OPERATION_MULTIPLY
} blas_operation_t;
/* CGD ***********************************************************************/
typedef enum cgd_option_t {
CGD_OPTION_HELP,
CGD_OPTION_INFILE,
CGD_OPTION_OUTFILE,
CGD_OPTION_ERROR,
CGD_OPTION_NITER,
CGD_OPTION_TIME,
CGD_OPTION_RUNS,
CGD_OPTION_THREADS,
CGD_OPTION_ROWPERM,
CGD_OPTION_COLPERM
} cgd_option_t;
/* PAGERANK ******************************************************************/
typedef enum pagerank_option_t {
PAGERANK_OPTION_HELP,
PAGERANK_OPTION_INFILE,
PAGERANK_OPTION_OUTFILE,
PAGERANK_OPTION_ERROR,
PAGERANK_OPTION_DAMPING,
PAGERANK_OPTION_NITER,
PAGERANK_OPTION_TIME,
PAGERANK_OPTION_RUNS,
PAGERANK_OPTION_THREADS,
PAGERANK_OPTION_PERM
} pagerank_option_t;
/******************************************************************************
* OPTION ARRAYS ***************************************************************
******************************************************************************/
/* COMMANDS ******************************************************************/
static const cmd_opt_pair_t COMMANDS[] = {
[COMMAND_HELP] = {"help","Display list of available commands.", \
COMMAND_HELP},
[COMMAND_ANALYSIS] = {"analysis","Perform an analysis on a matrix/vector.", \
COMMAND_ANALYSIS},
[COMMAND_TRANSFORM] = {"transform","Transform a matrix/vector.", \
COMMAND_TRANSFORM},
[COMMAND_PERMUTE] = {"permute","Permute a matrix/vector.",COMMAND_PERMUTE},
[COMMAND_GENERATE] = {"generate","Generate a matrix/vector.", \
COMMAND_GENERATE},
[COMMAND_BLAS] = {"blas","Perform a blas operation.",COMMAND_BLAS},
[COMMAND_CGD] = {"cgd","Perform conjugate gradient descent.",COMMAND_CGD},
[COMMAND_SGD] = {"sgd","Perform stocastic gradient descent.",COMMAND_SGD},
[COMMAND_PAGERANK] = {"pagerank","Perform a pagerank on a square matrix.", \
COMMAND_PAGERANK}
};
static const size_t NCOMMANDS = ARRAY_SIZE(COMMANDS);
/* ANALYSIS ******************************************************************/
static const cmd_opt_pair_t ANALYSIS[] = {
{"matrixstats","Calculate statistics of a matrix/vector", \
ANALYSIS_MATRIXSTATS},
{"cholesky","Calculate the stats associated with a cholesky decomposition", \
ANALYSIS_CHOLESKY}
};
static const cmd_opt_t ANALYSIS_OPTS[] = {
{ANALYSIS_OPTION_HELP,'h',"help","Display this help page.",CMD_OPT_FLAG, \
NULL,0},
{ANALYSIS_OPTION_INFILE,'i',"infile","The input matrix.",CMD_OPT_STRING, \
NULL,0},
{ANALYSIS_OPTION_PERMFILE,'p',"permfile","The permutation vector.", \
CMD_OPT_STRING,NULL,0},
{ANALYSIS_OPTION_TYPE,'a',"type","The type of analysis to perform.", \
CMD_OPT_CHOICE,ANALYSIS,ARRAY_SIZE(ANALYSIS)},
{PERMUTE_OPTION_TIME,'t',"times","Print timing of the analysis.", \
CMD_OPT_FLAG,NULL,0}
};
static const size_t NANALYSIS_OPTS = ARRAY_SIZE(ANALYSIS_OPTS);
/* PERMUTE *******************************************************************/
static const cmd_opt_pair_t PERMUTE_PERMUTATIONS[] = {
{"random","Perform a random permutation on the rows and columns.", \
PERMUTE_PERMUTATION_RANDOM},
{"file","Perform a permuation based on input files (specified with -R " \
"and/or -C, can be permutations or partitions).", \
PERMUTE_PERMUTATION_FILE},
{"bandwidth","Perform a bandwidth reducing permutation.", \
PERMUTE_PERMUTATION_BANDWIDTH},
};
static const cmd_opt_t PERMUTE_OPTS[] = {
{PERMUTE_OPTION_HELP,'h',"help","Display this help page.",CMD_OPT_FLAG, \
NULL,0},
{PERMUTE_OPTION_INFILE,'i',"infile","An input matrix/vector file.", \
CMD_OPT_STRING,NULL,0},
{PERMUTE_OPTION_OUTFILE,'o',"outfile","The output vector file.", \
CMD_OPT_STRING,NULL,0},
{PERMUTE_OPTION_PERMUTATION,'p',"permutation","The type of permutation to " \
"perform.",CMD_OPT_CHOICE,PERMUTE_PERMUTATIONS, \
ARRAY_SIZE(PERMUTE_PERMUTATIONS)},
{PERMUTE_OPTION_TIME,'t',"times","Print timing of the permutation.", \
CMD_OPT_FLAG,NULL,0},
{PERMUTE_OPTION_ROWPERM,'R',"rowperm","Row permutation/partition file.", \
CMD_OPT_STRING,NULL,0},
{PERMUTE_OPTION_COLPERM,'C',"colperm","Column permutation/partition file.", \
CMD_OPT_STRING,NULL,0}
};
static const size_t NPERMUTE_OPTS = ARRAY_SIZE(PERMUTE_OPTS);
/* TRANSFROM *****************************************************************/
static const cmd_opt_pair_t TRANSFORM_OPERATIONS[] = {
{"convert","Convert from one matrix/vector format to another.", \
TRANSFORM_OPERATION_CONVERT},
{"symmetrify","Transform to a symmetric matrix: B = A + A^T.", \
TRANSFORM_OPERATION_SYMMETRIFY},
{"debipartify","Transform to a symmetric matrix: B = [ 0 , A ; A^T , 0].", \
TRANSFORM_OPERATION_DEBIPARTIFY},
{"rowsplit","Split a matrix row-wise into submatrices: " \
"[ B.0 ; B.1 ; ... ] = A.",TRANSFORM_OPERATION_ROWSPLIT},
{"colsplit","Split a matrix column-wise into submatrices: " \
"[ B.0 , B.1 , ... ] = A.",TRANSFORM_OPERATION_COLSPLIT},
#ifdef XXX
{"rowjoin","Join submatrices row-wise into a single matrix: " \
"B = [ A.0 ; A.1 ; ... ].",TRANSFORM_OPERATION_ROWJOIN},
{"coljoin","Join submatrices column-wise into a single matrix: " \
"B = [ A.0 , A.1 , ... ].",TRANSFORM_OPERATION_COLJOIN},
#endif
{"transpose","Transpose a matrix: " \
"B = A^T.",TRANSFORM_OPERATION_TRANSPOSE}
};
static const cmd_opt_t TRANSFORM_OPTS[] = {
{TRANSFORM_OPTION_HELP,'h',"help","Display this help page.",CMD_OPT_FLAG, \
NULL,0},
{TRANSFORM_OPTION_INFILE,'i',"infile","An input matrix/vector file.", \
CMD_OPT_STRING,NULL,0},
{TRANSFORM_OPTION_OUTFILE,'o',"outfile","The output vector file.", \
CMD_OPT_STRING,NULL,0},
{TRANSFORM_OPTION_PARTFILE,'p',"partfile","The partition vector file.", \
CMD_OPT_STRING,NULL,0},
{TRANSFORM_OPTION_OPERATION,'x',"operation","The type of permutation to " \
"perform.",CMD_OPT_CHOICE,TRANSFORM_OPERATIONS, \
ARRAY_SIZE(TRANSFORM_OPERATIONS)},
{TRANSFORM_OPTION_TIME,'t',"times","Print timing of the permutation.", \
CMD_OPT_FLAG,NULL,0}
};
static const size_t NTRANSFORM_OPTS = ARRAY_SIZE(TRANSFORM_OPTS);
/* TRANSFROM *****************************************************************/
static const cmd_opt_pair_t GENERATE_OPERATIONS[] = {
{"vector","Generate a dense vector.",GENERATE_TYPE_DENSE_VECTOR}
};
static const cmd_opt_t GENERATE_OPTS[] = {
{GENERATE_OPTION_HELP,'h',"help","Display this help page.",CMD_OPT_FLAG, \
NULL,0},
{GENERATE_OPTION_OUTFILE,'o',"outfile","The output vector file.", \
CMD_OPT_STRING,NULL,0},
{GENERATE_OPTION_TYPE,'g',"type","The type of permutation to " \
"perform.",CMD_OPT_CHOICE,GENERATE_OPERATIONS, \
ARRAY_SIZE(GENERATE_OPERATIONS)},
{GENERATE_OPTION_SIZE,'s',"size","Size of the generate matrix/vector", \
CMD_OPT_INT,NULL,0},
{GENERATE_OPTION_TIME,'t',"times","Print timing of the permutation.", \
CMD_OPT_FLAG,NULL,0}
};
static const size_t NGENERATE_OPTS = ARRAY_SIZE(GENERATE_OPTS);
/* BLAS **********************************************************************/
static const cmd_opt_pair_t BLAS_OPERATIONS[] = {
{"noop","Perform no operation, just copy the input matrix/vector.", \
BLAS_OPERATION_NOOP},
{"multiply","Multiply a matrix/vector with a matrix/vector.", \
BLAS_OPERATION_MULTIPLY}
};
static const cmd_opt_t BLAS_OPTS[] = {
{BLAS_OPTION_HELP,'h',"help","Display this help page.",CMD_OPT_FLAG, \
NULL,0},
{BLAS_OPTION_OPERATION,'x',"operation","The type of operation to perform.", \
CMD_OPT_CHOICE,BLAS_OPERATIONS,ARRAY_SIZE(BLAS_OPERATIONS)},
{BLAS_OPTION_INFILE,'i',"infile","An input matrix/vector file.", \
CMD_OPT_STRING,NULL,0},
{BLAS_OPTION_OUTFILE,'o',"outfile","The output matrix/vector file.", \
CMD_OPT_STRING,NULL,0},
{BLAS_OPTION_TIME,'t',"times","Print timing of the blas routines.", \
CMD_OPT_FLAG,NULL,0},
{BLAS_OPTION_RUNS,'r',"runs","Number of repeated runs (only useful for " \
"timing purposes).",CMD_OPT_INT,NULL,0},
#ifndef NO_OMP
{BLAS_OPTION_THREADS,'T',"threads","Number of threads.",CMD_OPT_INT,NULL,0},
#endif
{BLAS_OPTION_ROWPERM,'R',"rowperm","Row permutation file.",CMD_OPT_STRING, \
NULL,0},
{BLAS_OPTION_COLPERM,'C',"colperm","Column permutation file.", \
CMD_OPT_STRING,NULL,0},
{BLAS_OPTION_REDUCEBANDWIDTH,'b',"bandwidthreduce","Re-order the matrix " \
"to reduce bandwidth.",CMD_OPT_FLAG, NULL,0}
};
static const size_t NBLAS_OPTS = ARRAY_SIZE(BLAS_OPTS);
/* CGD ****************************************************************/
static const cmd_opt_t CGD_OPTS[] = {
{CGD_OPTION_HELP,'h',"help","Display this help page.",CMD_OPT_FLAG, \
NULL,0},
{CGD_OPTION_INFILE,'i',"infile","An input matrix/vector file.", \
CMD_OPT_STRING,NULL,0},
{CGD_OPTION_OUTFILE,'o',"outfile","The output vector file.", \
CMD_OPT_STRING,NULL,0},
{CGD_OPTION_ERROR,'e',"error","The RMSE to achieve before exiting.", \
CMD_OPT_FLOAT,NULL,0},
{CGD_OPTION_NITER,'I',"iter","The number of iterations to run before " \
"exiting.",CMD_OPT_INT,NULL,0},
{CGD_OPTION_TIME,'t',"times","Print timing of the cgd routines.", \
CMD_OPT_FLAG,NULL,0},
{CGD_OPTION_RUNS,'r',"runs","Number of repeated runs (only useful for " \
"timing purposes).",CMD_OPT_INT,NULL,0},
#ifndef NO_OMP
{CGD_OPTION_THREADS,'T',"threads","Number of threads.",CMD_OPT_INT,NULL,0},
#endif
{CGD_OPTION_ROWPERM,'R',"rowperm","Row permutation file.",CMD_OPT_STRING, \
NULL,0},
{CGD_OPTION_COLPERM,'C',"colperm","Column permutation file.", \
CMD_OPT_STRING,NULL,0}
};
static const size_t NCGD_OPTS = ARRAY_SIZE(CGD_OPTS);
/* PAGERANK ******************************************************************/
static const cmd_opt_t PAGERANK_OPTS[] = {
{PAGERANK_OPTION_HELP,'h',"help","Display this help page.",CMD_OPT_FLAG, \
NULL,0},
{PAGERANK_OPTION_INFILE,'i',"infile","An input matrix file.", \
CMD_OPT_STRING,NULL,0},
{PAGERANK_OPTION_OUTFILE,'o',"outfile","The output matrix/vector file.", \
CMD_OPT_STRING,NULL,0},
{PAGERANK_OPTION_TIME,'t',"times","Print timing of the pagerank " \
"calcuation.", CMD_OPT_FLAG,NULL,0},
{PAGERANK_OPTION_RUNS,'r',"runs","Number of repeated runs (only useful " \
"for timing purposes).",CMD_OPT_INT,NULL,0},
{PAGERANK_OPTION_NITER,'I',"iterations","Maximum number of iterations.", \
CMD_OPT_INT,NULL,0},
{PAGERANK_OPTION_ERROR,'e',"error","Error threshold for stopping.", \
CMD_OPT_FLOAT,NULL,0},
{PAGERANK_OPTION_DAMPING,'d',"damping","Damping factor to use.", \
CMD_OPT_FLOAT,NULL,0},
#ifndef NO_OMP
{PAGERANK_OPTION_THREADS,'T',"threads","Number of threads.",CMD_OPT_INT, \
NULL,0},
#endif
{PAGERANK_OPTION_PERM,'p',"perm","Row and column permutation file.", \
CMD_OPT_STRING,NULL,0}
};
static const size_t NPAGERANK_OPTS = ARRAY_SIZE(PAGERANK_OPTS);
/* FILE TYPES ****************************************************************/
static const char * RAW_EXTENSIONS[] = {"raw",NULL};
static const char * GRID_EXTENSIONS[] = {"mat","grid","vec","txt",NULL};
static const char * CSR_EXTENSIONS[] = {"csr",NULL};
static const char * SVM_EXTENSIONS[] = {"svm","libfm",NULL};
static const char * POINT_EXTENSIONS[] = {"ij","point",NULL};
static const char * GRAPH_EXTENSIONS[] = {"metis","chaco","graph",NULL};
static const char * CLU_EXTENSIONS[] = {"clu",NULL};
static const char * MATRIXMARKET_EXTENSIONS[] = {"mm","mtx",NULL};
static const char * const * const FILE_TYPES[] = {
[GOOSEBERRY_FORMAT_RAW] = RAW_EXTENSIONS,
[GOOSEBERRY_FORMAT_GRID] = GRID_EXTENSIONS,
[GOOSEBERRY_FORMAT_CSR] = CSR_EXTENSIONS,
[GOOSEBERRY_FORMAT_SVM] = SVM_EXTENSIONS,
[GOOSEBERRY_FORMAT_POINT] = POINT_EXTENSIONS,
[GOOSEBERRY_FORMAT_GRAPH] = GRAPH_EXTENSIONS,
[GOOSEBERRY_FORMAT_CLU] = CLU_EXTENSIONS,
[GOOSEBERRY_FORMAT_MATRIXMARKET] = MATRIXMARKET_EXTENSIONS
};
/******************************************************************************
* PRIVATE FUNCTIONS ***********************************************************
******************************************************************************/
static int __is_dense(
int type)
{
switch (type) {
case GOOSEBERRY_FORMAT_RAW:
case GOOSEBERRY_FORMAT_GRID:
return 1;
default:
return 0;
}
}
static int __get_file_type(
const char * const name)
{
size_t i,j;
for (i=0;i<ARRAY_SIZE(FILE_TYPES);++i) {
for (j=0;FILE_TYPES[i][j] != NULL;++j) {
if (dl_string_endswith(name,FILE_TYPES[i][j])) {
return i;
}
}
}
return -1;
}
static int __usage(
const char * const name,
FILE * fout)
{
size_t i;
fprintf(fout,"USAGE:\n");
fprintf(fout,"%s <command> [options]\n",name);
fprintf(fout,"\n");
fprintf(fout,"Commands:\n");
for (i=0;i<NCOMMANDS;++i) {
fprintf(fout,"\t%s : %s\n",COMMANDS[i].str,COMMANDS[i].desc);
}
return 1;
}
static int __command_usage(
const char * const name,
const char * const cmd,
const cmd_opt_t * const opts,
const size_t nopts,
FILE * fout)
{
fprintf(stdout,"USAGE:\n");
fprintf(stdout,"%s %s [options]\n",name,cmd);
fprintf(stdout,"\n");
fprint_cmd_opts(fout,opts,nopts);
return 1;
}
/* COMMAND FUNCTIONS *********************************************************/
static int __help(
int argc,
char ** argv)
{
__usage(argv[0],stdout);
return GOOSEBERRY_SUCCESS;
}
static int __analyze(
int argc,
char ** argv)
{
dl_timer_t io_tmr, op_tmr;
size_t nargs, i;
dim_t k, nec, ner, prows;
ind_t j, nnz;
real_t v, minvalue, maxvalue;
dim_t * rowsize, * colsize, * perm, * order, * pk = NULL;
int times, err, type, analysis;
cmd_arg_t * args = NULL;
matrix_t * mat = NULL;
char const * matfile = NULL, * pfile = NULL;
double nops;
/* set defaults */
times = 0;
analysis = ANALYSIS_MATRIXSTATS;
err = cmd_parse_args(argc-2,argv+2,ANALYSIS_OPTS,NANALYSIS_OPTS,&args, \
&nargs);
if (err != DL_CMDLINE_SUCCESS) {
return GOOSEBERRY_ERROR_INVALIDINPUT;
}
err = GOOSEBERRY_SUCCESS;
if (nargs < 2) {
__command_usage(argv[0],argv[1],ANALYSIS_OPTS,NANALYSIS_OPTS,stderr);
goto END;
}
for (i=0;i<nargs;++i) {
switch (args[i].id) {
case ANALYSIS_OPTION_HELP:
__command_usage(argv[0],argv[1],ANALYSIS_OPTS,NANALYSIS_OPTS,stdout);
goto END;
break;
case ANALYSIS_OPTION_TYPE:
analysis = (analysis_t)args[i].val.o;
break;
case ANALYSIS_OPTION_INFILE:
if (matfile == NULL) {
matfile = args[i].val.s;
} else {
eprintf("Too many input files specified\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
break;
case ANALYSIS_OPTION_PERMFILE:
pfile = args[i].val.s;
break;
case ANALYSIS_OPTION_TIME:
times = 1;
break;
default:
eprintf("Unknown argument '%s'\n",args[i].val.s);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
break;
}
}
if (matfile == NULL) {
eprintf("You must specify a matrix/vector input file.\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_init_timer(&io_tmr);
dl_init_timer(&op_tmr);
dl_start_timer(&io_tmr);
}
/* read in input files */
type = __get_file_type(matfile);
if (type < 0) {
eprintf("Unknown file format of '%s'\n",matfile);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
goto END;
} else {
/* read in the matrix */
mat = matrix_alloc(1);
memset(mat,0,sizeof(matrix_t));
if (__is_dense(type)) {
err = gooseberry_read_dense_matrix(type,matfile,&(mat->nrows),
&(mat->ncols),&(mat->rowval));
if (mat->ncols == 1 || mat->nrows == 1) {
mat->type = MATRIX_TYPE_DENSE_VECTOR;
} else {
mat->type = MATRIX_TYPE_DENSE_MATRIX;
}
} else {
err = gooseberry_read_sparse_matrix(type,matfile,&(mat->nrows),
&(mat->ncols),&(mat->rowptr),&(mat->rowind),&(mat->rowval));
if (mat->ncols == 1 || mat->nrows == 1) {
mat->type = MATRIX_TYPE_SPARSE_VECTOR;
} else {
mat->type = MATRIX_TYPE_SPARSE_MATRIX;
}
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
if (pfile) {
prows = mat->nrows;
err = gooseberry_read_labels(pfile,&prows,&pk);
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (prows != mat->nrows) {
eprintf("Matrix is "PF_DIM_T"x"PF_DIM_T" but permutation file has " \
PF_DIM_T" rows.\n",mat->nrows,mat->ncols,prows);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
goto END;
}
}
if (times) {
dl_stop_timer(&io_tmr);
dl_start_timer(&op_tmr);
}
if (pk) {
perm = dim_alloc(mat->nrows);
order = dim_alloc(mat->nrows);
dim_incset(order,0,1,mat->nrows);
dd_countingsort_kv(pk,order,0,mat->nrows,mat->nrows,perm,NULL);
matrix_permute(mat,perm,perm);
dl_free(order);
dl_free(perm);
}
nnz = mat->rowptr[mat->nrows];
switch (analysis) {
case ANALYSIS_MATRIXSTATS:
if (nnz > 0) {
minvalue = maxvalue = mat->rowval[0];
} else {
minvalue = maxvalue = 0;
}
rowsize = dim_alloc(mat->nrows);
colsize = dim_init_alloc(0,mat->ncols);
ner = 0;
for (i=0;i<mat->nrows;++i) {
rowsize[i] = mat->rowptr[i+1] - mat->rowptr[i];
if (rowsize[i] == 0) {
++ner;
}
}
for (i=0;i<mat->nrows;++i) {
for (j=mat->rowptr[i];j<mat->rowptr[i+1];++j) {
k = mat->rowind[j];
v = mat->rowval[j];
++colsize[k];
if (v < minvalue) {
minvalue = v;
}
if (v > maxvalue) {
maxvalue = v;
}
}
}
nec = 0;
for (i=0;i<mat->ncols;++i) {
if (colsize[i] == 0) {
++nec;
}
}
printf("Number of rows = %16zu\n",(size_t)mat->nrows);
printf("Number of columns = %16zu\n",(size_t)mat->ncols);
printf("Number of non-zeros = %16zu\n",(size_t)mat->rowptr[mat->nrows]);
printf("Median nnz / row = %16zu\n", \
(size_t)dim_median(rowsize,mat->nrows));
printf("Mean nnz / row = %16.3lf\n", \
dim_arithmetic_mean(rowsize,mat->nrows));
printf("Median nnz / column = %16zu\n", \
(size_t)dim_median(colsize,mat->ncols));
printf("Mean nnz / column = %16.3lf\n", \
dim_arithmetic_mean(colsize,mat->ncols));
printf("Maximum value = %16.3lf\n",maxvalue);
printf("Minimum value = %16.3lf\n",minvalue);
printf("Number of empty rows = %16zu\n",(size_t)ner);
printf("Number of empty columns = %16zu\n",(size_t)nec);
dl_free(rowsize);
dl_free(colsize);
break;
case ANALYSIS_CHOLESKY:
analyze_cholesky(mat->nrows,mat->rowptr,mat->rowind,&nnz,&nops);
printf("Number of non-zeroes = "PF_IND_T"\n",nnz);
printf("Number of operations = %g\n",nops);
break;
default:
eprintf("Unknown analysis '%d'\n",analysis);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
goto END;
}
if (times) {
dl_stop_timer(&op_tmr);
}
if (times) {
dl_stop_timer(&io_tmr);
dl_print_header("Times",'#');
printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr));
printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr));
dl_print_footer('#');
}
END:
if (pk) {
dl_free(pk);
}
if (mat) {
matrix_free(mat);
}
if (args) {
dl_free(args);
}
return err;
}
static int __permute(
int argc,
char ** argv)
{
dl_timer_t io_tmr, op_tmr;
size_t nargs,i;
int times, j, err, permutation;
dim_t prows;
cmd_arg_t * args = NULL;
dim_t * rpk = NULL, * cpk = NULL, *rperm = NULL, *cperm = NULL, *order;
matrix_t * mat = NULL;
const char * matfile = NULL, * outfile = NULL, * rpf = NULL, * cpf = NULL;
/* set defaults */
times = 0;
permutation = PERMUTE_PERMUTATION_RANDOM;
err = cmd_parse_args(argc-2,argv+2,PERMUTE_OPTS,NPERMUTE_OPTS,&args,&nargs);
if (err != DL_CMDLINE_SUCCESS) {
return GOOSEBERRY_ERROR_INVALIDINPUT;
}
err = GOOSEBERRY_SUCCESS;
if (nargs < 2) {
__command_usage(argv[0],argv[1],PERMUTE_OPTS,NPERMUTE_OPTS,stderr);
goto END;
}
for (i=0;i<nargs;++i) {
switch (args[i].id) {
case PERMUTE_OPTION_HELP:
__command_usage(argv[0],argv[1],PERMUTE_OPTS,NPERMUTE_OPTS,stdout);
goto END;
break;
case PERMUTE_OPTION_INFILE:
if (matfile == NULL) {
matfile = args[i].val.s;
} else {
eprintf("Too many input files specified\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
break;
case PERMUTE_OPTION_OUTFILE:
outfile = args[i].val.s;
break;
case PERMUTE_OPTION_TIME:
times = 1;
break;
case PERMUTE_OPTION_ROWPERM:
rpf = args[i].val.s;
break;
case PERMUTE_OPTION_COLPERM:
cpf = args[i].val.s;
break;
case PERMUTE_OPTION_PERMUTATION:
permutation = (permute_permutation_t)args[i].val.o;
break;
default:
eprintf("Unknown argument '%s'\n",args[i].val.s);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
break;
}
}
if (matfile == NULL) {
eprintf("You must specify a matrix/vector input file.\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
if (outfile == NULL) {
eprintf("You must specify an output matrix/vector file.\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
if (permutation == PERMUTE_PERMUTATION_FILE) {
if (rpf == NULL && cpf == NULL) {
eprintf("You must specify a row permutation and/or a column permutation "
"to permute from a file.\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
} else {
if (rpf || cpf) {
eprintf("Input row and column permutation files are only for use with "
"file based permutations.\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_init_timer(&io_tmr);
dl_init_timer(&op_tmr);
dl_start_timer(&io_tmr);
}
/* read in input files */
j = __get_file_type(matfile);
if (j < 0) {
eprintf("Unknown file format of '%s'\n",matfile);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
goto END;
} else {
/* read in the matrix */
mat = matrix_calloc(1);
if (__is_dense(j)) {
err = gooseberry_read_dense_matrix(j,matfile,&(mat->nrows),
&(mat->ncols),&(mat->rowval));
if (mat->ncols == 1 || mat->nrows == 1) {
mat->type = MATRIX_TYPE_DENSE_VECTOR;
} else {
mat->type = MATRIX_TYPE_DENSE_MATRIX;
}
} else {
err = gooseberry_read_sparse_matrix(j,matfile,&(mat->nrows),
&(mat->ncols),&(mat->rowptr),&(mat->rowind),&(mat->rowval));
if (mat->ncols == 1 || mat->nrows == 1) {
mat->type = MATRIX_TYPE_SPARSE_VECTOR;
} else {
mat->type = MATRIX_TYPE_SPARSE_MATRIX;
}
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
if (permutation == PERMUTE_PERMUTATION_RANDOM) {
if (mat->nrows != mat->ncols) {
eprintf("Cannot apply a single permutation to columns and rows of a "
"non-square matrix\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
goto END;
}
}
/* read in permutation files if provided */
if (permutation == PERMUTE_PERMUTATION_FILE) {
if (rpf) {
prows = mat->nrows;
err = gooseberry_read_labels(rpf,&prows,&rpk);
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
if (cpf) {
prows = mat->ncols;
err = gooseberry_read_labels(cpf,&prows,&cpk);
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
}
if (times) {
dl_stop_timer(&io_tmr);
dl_start_timer(&op_tmr);
}
/* permute the input matrices */
switch (permutation) {
case PERMUTE_PERMUTATION_FILE:
if (rpk) {
rperm = dim_alloc(mat->nrows);
order = dim_alloc(mat->nrows);
dim_incset(order,0,1,mat->nrows);
dd_countingsort_kv(rpk,order,0,mat->nrows,mat->nrows,rperm,
&mat->rdist);
dl_free(order);
dl_free(rpk);
rpk = NULL;
}
if (cpk) {
cperm = dim_alloc(mat->ncols);
order = dim_alloc(mat->ncols);
dim_incset(order,0,1,mat->ncols);
dd_countingsort_kv(cpk,order,0,mat->ncols,mat->ncols,cperm,NULL);
dl_free(order);
dl_free(cpk);
cpk = NULL;
}
break;
case PERMUTE_PERMUTATION_RANDOM:
rperm = dim_alloc(mat->nrows);
dim_incset(rperm,0,1,mat->nrows);
dim_pseudo_shuffle(rperm,mat->nrows/4,mat->nrows);
cperm = dim_duplicate(rperm,mat->nrows);
break;
case PERMUTE_PERMUTATION_ROWRANDOM:
rperm = dim_alloc(mat->nrows);
dim_incset(rperm,0,1,mat->nrows);
dim_pseudo_shuffle(rperm,mat->nrows/4,mat->nrows);
break;
case PERMUTE_PERMUTATION_COLRANDOM:
cperm = dim_alloc(mat->ncols);
dim_incset(cperm,0,1,mat->ncols);
dim_pseudo_shuffle(cperm,mat->ncols/4,mat->ncols);
break;
case PERMUTE_PERMUTATION_BANDWIDTH:
rperm = dim_alloc(mat->nrows);
if ((err = permute_cuthillmckee(mat->nrows,mat->ncols,mat->rowptr, \
mat->rowind,mat->rowval,NULL,0,rperm)) != GOOSEBERRY_SUCCESS) {
goto END;
}
cperm = dim_duplicate(rperm,mat->nrows);
break;
}
matrix_permute(mat,rperm,cperm);
if (times) {
dl_stop_timer(&op_tmr);
dl_start_timer(&io_tmr);
}
/* save the output */
j = __get_file_type(outfile);
if (__is_dense(j)) {
if (mat->type != MATRIX_TYPE_DENSE_VECTOR &&
mat->type != MATRIX_TYPE_DENSE_MATRIX) {
matrix_densify(mat);
}
err = gooseberry_write_dense_matrix(j,outfile,mat->nrows,mat->ncols,
mat->rowval);
} else {
if (mat->type != MATRIX_TYPE_SPARSE_VECTOR &&
mat->type != MATRIX_TYPE_SPARSE_MATRIX) {
matrix_sparsify(mat);
}
err = gooseberry_write_sparse_matrix(j,outfile,mat->nrows,mat->ncols,
mat->rowptr,mat->rowind,mat->rowval);
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_stop_timer(&io_tmr);
dl_print_header("Times",'#');
printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr));
printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr));
dl_print_footer('#');
}
END:
if (mat) {
matrix_free(mat);
}
if (rperm) {
dl_free(rperm);
}
if (rpk) {
dl_free(rpk);
}
if (cperm) {
dl_free(cperm);
}
if (cpk) {
dl_free(cpk);
}
if (args) {
dl_free(args);
}
return err;
}
static int __transform(
int argc,
char ** argv)
{
dl_timer_t io_tmr, op_tmr;
size_t nargs, i;
int times, j, err, operation;
ind_t offset;
dim_t prows, pcols, nout, p, nparts;
cmd_arg_t * args = NULL;
matrix_t * mat = NULL, * out = NULL;
dim_t * dist = NULL, * map = NULL;
char * sfile;
char const * matfile = NULL, * outfile = NULL, * partfile = NULL;
/* set defaults */
nparts = 0;
nout = 0;
times = 0;
pcols = prows = 0;
operation = -1;
err = cmd_parse_args(argc-2,argv+2,TRANSFORM_OPTS,NTRANSFORM_OPTS,&args, \
&nargs);
if (err != DL_CMDLINE_SUCCESS) {
return GOOSEBERRY_ERROR_INVALIDINPUT;
}
err = GOOSEBERRY_SUCCESS;
if (nargs < 2) {
__command_usage(argv[0],argv[1],TRANSFORM_OPTS,NTRANSFORM_OPTS,stderr);
goto END;
}
for (i=0;i<nargs;++i) {
switch (args[i].id) {
case TRANSFORM_OPTION_HELP:
__command_usage(argv[0],argv[1],TRANSFORM_OPTS,NTRANSFORM_OPTS,stdout);
goto END;
break;
case TRANSFORM_OPTION_INFILE:
if (matfile == NULL) {
matfile = args[i].val.s;
} else {
eprintf("Extra input file specified: '%s'\n",args[i].val.s);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
break;
case TRANSFORM_OPTION_OUTFILE:
if (outfile == NULL) {
outfile = args[i].val.s;
} else {
eprintf("Extra output file specified: '%s'\n",args[i].val.s);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
break;
case TRANSFORM_OPTION_TIME:
times = 1;
break;
case TRANSFORM_OPTION_OPERATION:
operation = (transform_operation_t)args[i].val.o;
break;
case TRANSFORM_OPTION_PARTFILE:
if (partfile == NULL) {
partfile = args[i].val.s;
} else {
eprintf("Extra part file specified: '%s'\n",args[i].val.s);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
break;
default:
eprintf("Unknown argument '%s'\n",args[i].val.s);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
break;
}
}
if (matfile == NULL) {
eprintf("You must specify a matrix/vector input file.\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
if (outfile == NULL) {
eprintf("You must specify an output matrix/vector file.\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
if ((operation == TRANSFORM_OPERATION_ROWSPLIT || \
operation == TRANSFORM_OPERATION_COLSPLIT) && \
(partfile == NULL && nparts == 0)) {
eprintf("You must specify a part file for splitting on or a number of "
"partitions.\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_init_timer(&io_tmr);
dl_init_timer(&op_tmr);
dl_start_timer(&io_tmr);
}
/* read in input file */
j = __get_file_type(matfile);
if (j < 0) {
eprintf("Unknown file format of '%s'\n",matfile);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
goto END;
} else {
/* read in the matrix */
mat = matrix_calloc(1);
if (__is_dense(j)) {
err = gooseberry_read_dense_matrix(j,matfile,&(mat->nrows),
&(mat->ncols),&(mat->rowval));
if (mat->ncols == 1 || mat->nrows == 1) {
mat->type = MATRIX_TYPE_DENSE_VECTOR;
} else {
mat->type = MATRIX_TYPE_DENSE_MATRIX;
}
} else {
err = gooseberry_read_sparse_matrix(j,matfile,&(mat->nrows), \
&(mat->ncols),&(mat->rowptr),&(mat->rowind),&(mat->rowval));
if (mat->ncols == 1 || mat->nrows == 1) {
mat->type = MATRIX_TYPE_SPARSE_VECTOR;
} else {
mat->type = MATRIX_TYPE_SPARSE_MATRIX;
}
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
if (operation == TRANSFORM_OPERATION_ROWSPLIT) {
if (partfile != NULL) {
err = gooseberry_read_partition(partfile,&prows,&nout,&map,NULL,&dist);
if (err != GOOSEBERRY_SUCCESS) {
goto END;
} else if (prows != mat->nrows) {
eprintf("Invalid number of rows in partition file: found '"PF_DIM_T"' "
"but matrix has '"PF_DIM_T"'\n",prows,mat->nrows);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
} else {
dl_error("Unimplemented\n");
}
} else if (operation == TRANSFORM_OPERATION_COLSPLIT) {
if (partfile) {
err = gooseberry_read_partition(partfile,&pcols,&nout,&map,NULL,&dist);
if (err != GOOSEBERRY_SUCCESS) {
goto END;
} else if (pcols != mat->ncols) {
eprintf("Invalid number of columns in partition file: found '"PF_DIM_T
"' but matrix has '"PF_DIM_T"'\n",pcols,mat->ncols);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
} else {
dl_error("Unimplemented\n");
}
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_stop_timer(&io_tmr);
dl_start_timer(&op_tmr);
}
out = matrix_calloc(1);
out->type = MATRIX_TYPE_SPARSE_MATRIX;
switch (operation) {
case TRANSFORM_OPERATION_CONVERT:
out->nrows = mat->nrows;
out->ncols = mat->ncols;
if (mat->type == out->type) {
out->rowptr = ind_duplicate(mat->rowptr,mat->nrows+1);
out->rowind = dim_duplicate(mat->rowind,mat->rowptr[mat->nrows]);
out->rowval = real_duplicate(mat->rowval,mat->rowptr[mat->nrows]);
} else {
dl_error("Not finished yet");
}
break;
case TRANSFORM_OPERATION_SYMMETRIFY:
out->ncols = out->nrows = dl_max(mat->nrows,mat->ncols);
out->type = MATRIX_TYPE_SPARSE_MATRIX;
err = gooseberry_symmetrify_sparse(mat->nrows,mat->ncols,mat->rowptr,
mat->rowind,mat->rowval,&out->rowptr,&out->rowind,&out->rowval);
break;
case TRANSFORM_OPERATION_DEBIPARTIFY:
out->ncols = out->nrows = mat->nrows+mat->ncols;
out->type = MATRIX_TYPE_SPARSE_MATRIX;
err = gooseberry_debipartify_sparse(mat->nrows,mat->ncols,mat->rowptr,
mat->rowind,mat->rowval,&out->rowptr,&out->rowind,&out->rowval);
break;
case TRANSFORM_OPERATION_TRANSPOSE:
out->ncols = mat->nrows;
out->nrows = mat->ncols;
err = gooseberry_transpose_sparse(mat->nrows,mat->ncols,mat->rowptr,
mat->rowind,mat->rowval,&out->rowptr,&out->rowind,&out->rowval);
break;
case TRANSFORM_OPERATION_ROWSPLIT:
out->ncols = mat->ncols;
out->nrows = mat->nrows;
gooseberry_rowsplit_sparse(mat->nrows,mat->ncols,mat->rowptr,mat->rowind,
mat->rowval,nout,dist,map,&out->rowptr,&out->rowind,&out->rowval);
break;
case TRANSFORM_OPERATION_COLSPLIT:
out->ncols = 0;
for (p=0;p<nout;++p) {
if (dist[p+1] - dist[p] > out->ncols) {
out->ncols = dist[p+1] - dist[p];
}
}
out->nrows = mat->nrows;
gooseberry_colsplit_sparse(mat->nrows,mat->ncols,mat->rowptr,mat->rowind,
mat->rowval,nout,dist,map,&out->rowptr,&out->rowind,&out->rowval);
break;
default:
eprintf("Unknown transform operation: '%d'\n",operation);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
break;
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_stop_timer(&op_tmr);
dl_start_timer(&io_tmr);
}
j = __get_file_type(outfile);
if (operation == TRANSFORM_OPERATION_COLSPLIT ||
operation == TRANSFORM_OPERATION_ROWSPLIT) {
/* long enough to append a 64 bit number and null character */
sfile = malloc(strlen(outfile)+22);
for (p=0;p<nout;++p) {
sprintf(sfile,"%s."PF_DIM_T,outfile,p);
if (operation == TRANSFORM_OPERATION_COLSPLIT) {
prows = mat->nrows;
pcols = dist[p+1] - dist[p];
offset = p*mat->nrows;
} else {
prows = dist[p+1] - dist[p];
pcols = mat->ncols;
offset = dist[p];
}
if (__is_dense(j)) {
offset = mat->nrows*mat->ncols*p;
if (out->type != MATRIX_TYPE_DENSE_VECTOR &&
out->type != MATRIX_TYPE_DENSE_MATRIX) {
matrix_densify(out);
}
err = gooseberry_write_dense_matrix(j,sfile,prows,pcols,
out->rowval+offset);
} else {
if (out->type != MATRIX_TYPE_SPARSE_VECTOR &&
out->type != MATRIX_TYPE_SPARSE_MATRIX) {
matrix_sparsify(out);
}
err = gooseberry_write_sparse_matrix(j,sfile,prows,pcols,
out->rowptr+offset,out->rowind,out->rowval);
}
}
dl_free(sfile);
} else {
/* save the output */
if (__is_dense(j)) {
if (out->type != MATRIX_TYPE_DENSE_VECTOR &&
out->type != MATRIX_TYPE_DENSE_MATRIX) {
matrix_densify(out);
}
err = gooseberry_write_dense_matrix(j,outfile,out->nrows,out->ncols,
out->rowval);
} else {
if (out->type != MATRIX_TYPE_SPARSE_VECTOR &&
out->type != MATRIX_TYPE_SPARSE_MATRIX) {
matrix_sparsify(out);
}
err = gooseberry_write_sparse_matrix(j,outfile,out->nrows,out->ncols,
out->rowptr,out->rowind,out->rowval);
}
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_stop_timer(&io_tmr);
dl_print_header("Times",'#');
printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr));
printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr));
dl_print_footer('#');
}
END:
if (mat) {
matrix_free(mat);
}
if (out) {
matrix_free(out);
}
if (args) {
dl_free(args);
}
return err;
}
static int __generate(
int argc,
char ** argv)
{
dl_timer_t io_tmr, op_tmr;
size_t nargs, i;
int times, j, err, type;
dim_t size;
cmd_arg_t * args = NULL;
matrix_t * out = NULL;
const char * outfile = NULL;
/* set defaults */
size = 0;
times = 0;
type = GENERATE_TYPE_NULL;
err = cmd_parse_args(argc-2,argv+2,GENERATE_OPTS,NGENERATE_OPTS,&args,
&nargs);
if (err != DL_CMDLINE_SUCCESS) {
return GOOSEBERRY_ERROR_INVALIDINPUT;
}
err = GOOSEBERRY_SUCCESS;
if (nargs < 2) {
__command_usage(argv[0],argv[1],GENERATE_OPTS,NGENERATE_OPTS,stderr);
goto END;
}
for (i=0;i<nargs;++i) {
switch (args[i].id) {
case GENERATE_OPTION_HELP:
__command_usage(argv[0],argv[1],GENERATE_OPTS,NGENERATE_OPTS,stdout);
goto END;
break;
case GENERATE_OPTION_OUTFILE:
if (outfile == NULL) {
outfile = args[i].val.s;
} else {
eprintf("Extra output file specified: '%s'\n",args[i].val.s);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
break;
case GENERATE_OPTION_TIME:
times = 1;
break;
case GENERATE_OPTION_TYPE:
type = (generate_type_t)args[i].val.o;
break;
case GENERATE_OPTION_SIZE:
size = (dim_t)args[i].val.i;
break;
default:
eprintf("Unknown argument '%s'\n",args[i].val.s);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
break;
}
}
if (size == 0) {
eprintf("You must specify a size greater than zero.\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
if (type == GENERATE_TYPE_NULL) {
eprintf("You must specify a type to generate.\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
if (outfile == NULL) {
eprintf("You must specify an output matrix/vector file.\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_init_timer(&io_tmr);
dl_init_timer(&op_tmr);
dl_start_timer(&op_tmr);
}
out = matrix_calloc(1);
switch (type) {
case GENERATE_TYPE_DENSE_VECTOR:
out->type = MATRIX_TYPE_DENSE_VECTOR;
out->nrows = size;
out->ncols = 1;
out->rowval = real_alloc(out->nrows);
real_fill_rand(-1.0,1.0,out->rowval,out->nrows);
break;
default:
eprintf("Unknown generate type: '%d'\n",type);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
break;
}
if (times) {
dl_stop_timer(&op_tmr);
dl_start_timer(&io_tmr);
}
j = __get_file_type(outfile);
if (__is_dense(j)) {
if (out->type != MATRIX_TYPE_DENSE_VECTOR &&
out->type != MATRIX_TYPE_DENSE_MATRIX) {
matrix_densify(out);
}
err = gooseberry_write_dense_matrix(j,outfile,out->nrows,out->ncols,
out->rowval);
} else {
if (out->type != MATRIX_TYPE_SPARSE_VECTOR &&
out->type != MATRIX_TYPE_SPARSE_MATRIX) {
matrix_sparsify(out);
}
err = gooseberry_write_sparse_matrix(j,outfile,out->nrows,out->ncols,
out->rowptr,out->rowind,out->rowval);
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_stop_timer(&io_tmr);
dl_print_header("Times",'#');
printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr));
printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr));
dl_print_footer('#');
}
END:
if (out) {
matrix_free(out);
}
if (args) {
dl_free(args);
}
return err;
}
static int __blas(
int argc,
char ** argv)
{
dl_timer_t io_tmr, op_tmr, aux_tmr;
size_t nargs, runs, r, i, ninfiles = 0,nthreads;
int times, j, err, oper, redband;
dim_t outrows, outcols, prows;
cmd_arg_t * args = NULL;
dim_t *rperm = NULL, *cperm = NULL, *bperm = NULL;
matrix_t * in[256], * out = NULL;
const char * infiles[256], * outfile = NULL, * rpf = NULL, * cpf = NULL;
/* set defaults */
redband = 0;
times = 0;
runs = 1;
oper = BLAS_OPERATION_NOOP;
err = cmd_parse_args(argc-2,argv+2,BLAS_OPTS,NBLAS_OPTS,&args,&nargs);
if (err != DL_CMDLINE_SUCCESS) {
return GOOSEBERRY_ERROR_INVALIDINPUT;
}
err = GOOSEBERRY_SUCCESS;
nthreads = DEFAULT_NUMTHREADS;
if (nargs < 2) {
__command_usage(argv[0],argv[1],BLAS_OPTS,NBLAS_OPTS,stderr);
goto END;
}
for (i=0;i<nargs;++i) {
switch (args[i].id) {
case BLAS_OPTION_HELP:
__command_usage(argv[0],argv[1],BLAS_OPTS,NBLAS_OPTS,stdout);
goto END;
break;
case BLAS_OPTION_OPERATION:
oper = (blas_operation_t)args[i].val.o;
break;
case BLAS_OPTION_INFILE:
infiles[ninfiles++] = args[i].val.s;
break;
case BLAS_OPTION_OUTFILE:
outfile = args[i].val.s;
break;
case BLAS_OPTION_TIME:
times = 1;
break;
case BLAS_OPTION_RUNS:
runs = (size_t)args[i].val.i;
break;
#ifndef NO_OMP
case BLAS_OPTION_THREADS:
nthreads = (size_t)args[i].val.i;
omp_set_num_threads(nthreads);
break;
#endif
case BLAS_OPTION_ROWPERM:
rpf = args[i].val.s;
break;
case BLAS_OPTION_COLPERM:
cpf = args[i].val.s;
break;
case BLAS_OPTION_REDUCEBANDWIDTH:
redband = 1;
break;
default:
eprintf("Unknown argument '%s'\n",args[i].val.s);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
break;
}
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_init_timer(&io_tmr);
dl_init_timer(&op_tmr);
dl_init_timer(&aux_tmr);
dl_start_timer(&io_tmr);
}
/* read in input files */
for (i=0;i<ninfiles;++i) {
j = __get_file_type(infiles[i]);
if (j < 0) {
eprintf("Unknown file format of '%s'\n",infiles[i]);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
goto END;
} else {
/* read in the matrix/vector */
in[i] = matrix_calloc(1);
if (__is_dense(j)) {
err = gooseberry_read_dense_matrix(j,infiles[i],&(in[i]->nrows),
&(in[i]->ncols),&(in[i]->rowval));
if (in[i]->ncols == 1 || in[i]->nrows == 1) {
in[i]->type = MATRIX_TYPE_DENSE_VECTOR;
} else {
in[i]->type = MATRIX_TYPE_DENSE_MATRIX;
}
} else {
err = gooseberry_read_sparse_matrix(j,infiles[i],&(in[i]->nrows),
&(in[i]->ncols),&(in[i]->rowptr),&(in[i]->rowind),
&(in[i]->rowval));
if (in[i]->ncols == 1 || in[i]->nrows == 1) {
in[i]->type = MATRIX_TYPE_SPARSE_VECTOR;
} else {
in[i]->type = MATRIX_TYPE_SPARSE_MATRIX;
}
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
}
/* read in permutation files if provided */
if (rpf) {
prows = in[0]->nrows;
err = gooseberry_read_partition(rpf,&prows,&in[0]->nrdist,NULL,&rperm,
&in[0]->rdist);
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
printf("Read "PF_DIM_T"-way row partition\n",in[0]->nrdist);
} else {
in[0]->nrdist = 1;
}
if (cpf) {
prows = in[0]->ncols;
err = gooseberry_read_partition(cpf,&prows,&in[0]->ncdist,NULL,&cperm,
&in[0]->cdist);
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
printf("Read "PF_DIM_T"-way column partition\n",in[0]->ncdist);
}
if (times) {
dl_stop_timer(&io_tmr);
dl_start_timer(&aux_tmr);
}
matrix_permute(in[0],rperm,cperm);
matrix_permute(in[1],cperm,NULL);
if (redband) {
bperm = dim_alloc(in[0]->nrows);
if ((err = permute_cuthillmckee(in[0]->nrows,in[0]->ncols,in[0]->rowptr,
in[0]->rowind,in[0]->rowval,in[0]->rdist,in[0]->nrdist,bperm))
!= GOOSEBERRY_SUCCESS) {
goto END;
}
if (rperm && cperm) {
for (i=0;i<in[0]->nrows;++i) {
rperm[i] = bperm[rperm[i]];
}
for (i=0;i<in[0]->nrows;++i) {
cperm[i] = bperm[cperm[i]];
}
}
matrix_permute(in[0],bperm,bperm);
matrix_permute(in[1],bperm,NULL);
dl_free(bperm);
}
/* allocate the output matrix */
outrows = in[0]->nrows;
outcols = in[ninfiles-1]->ncols;
out = matrix_alloc(1);
j = __get_file_type(outfile);
if (__is_dense(j)) {
if (outrows == 1|| outcols == 1) {
matrix_init(MATRIX_TYPE_DENSE_VECTOR,outrows,outcols,0,out);
} else {
matrix_init(MATRIX_TYPE_DENSE_MATRIX,outrows,outcols,0,out);
}
} else {
if (outrows == 1|| outcols == 1) {
matrix_init(MATRIX_TYPE_SPARSE_VECTOR,outrows,outcols,NULL_IND,out);
} else {
matrix_init(MATRIX_TYPE_SPARSE_MATRIX,outrows,outcols,NULL_IND,out);
}
}
if (times) {
dl_stop_timer(&aux_tmr);
dl_start_timer(&op_tmr);
}
for (r=0;r<runs;++r) {
/* perform operation */
switch (oper) {
case BLAS_OPERATION_MULTIPLY:
if (in[0]->ncols > in[1]->nrows) {
eprintf("Matrix dimensions do not match for multiplication: "
PF_DIM_T"x"PF_DIM_T" and "PF_DIM_T"x"PF_DIM_T"\n",in[0]->nrows,
in[0]->ncols,in[1]->nrows,in[1]->ncols);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
goto END;
}
switch(in[0]->type) {
case MATRIX_TYPE_SPARSE_VECTOR:
case MATRIX_TYPE_SPARSE_MATRIX:
switch (in[1]->type) {
case MATRIX_TYPE_DENSE_VECTOR:
case MATRIX_TYPE_DENSE_MATRIX:
matrix_buildindex(in[1]);
if ((err = blas_spmult(in[0]->nrows,in[0]->ncols,in[1]->ncols,
in[0]->rowptr,in[0]->rowind,in[0]->rowval,
in[1]->colval,out->rowval,in[0]->rdist,in[0]->nrdist))
!= GOOSEBERRY_SUCCESS) {
goto END;
}
break;
case MATRIX_TYPE_SPARSE_VECTOR:
case MATRIX_TYPE_SPARSE_MATRIX:
switch (out->type) {
case MATRIX_TYPE_DENSE_VECTOR:
case MATRIX_TYPE_DENSE_MATRIX:
if ((err = blas_spmultsp(in[0]->nrows,in[0]->ncols,
in[1]->ncols,in[0]->rowptr,in[0]->rowind,
in[0]->rowval,in[1]->rowptr,in[1]->rowind,
in[1]->rowval,out->rowval,in[0]->rdist,
in[0]->nrdist)) != GOOSEBERRY_SUCCESS) {
goto END;
}
break;
case MATRIX_TYPE_SPARSE_VECTOR:
case MATRIX_TYPE_SPARSE_MATRIX:
if ((err = blas_spmultsp_sp(in[0]->nrows,in[0]->ncols,
in[1]->ncols,in[0]->rowptr,in[0]->rowind,
in[0]->rowval,in[1]->rowptr,in[1]->rowind,
in[1]->rowval,&out->rowptr,&out->rowind,
&out->rowval,in[0]->rdist,in[0]->nrdist)) !=
GOOSEBERRY_SUCCESS) {
goto END;
}
break;
default:
eprintf("Unsupported output matrix type: %d\n",out->type);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
break;
}
break;
default:
eprintf("Unsupported matrix combinations\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
break;
}
break;
case MATRIX_TYPE_DENSE_VECTOR:
case MATRIX_TYPE_DENSE_MATRIX:
switch (in[1]->type) {
case MATRIX_TYPE_DENSE_VECTOR:
case MATRIX_TYPE_DENSE_MATRIX:
matrix_buildindex(in[1]);
if ((err = blas_mult(in[0]->nrows,in[0]->ncols,in[1]->ncols,
in[0]->rowval,in[1]->colval,out->rowval,in[0]->rdist,
in[0]->nrdist)) != GOOSEBERRY_SUCCESS) {
goto END;
}
break;
case MATRIX_TYPE_SPARSE_VECTOR:
case MATRIX_TYPE_SPARSE_MATRIX:
eprintf("The operation mmsp is unsupported at the moment.\n");
err = GOOSEBERRY_ERROR_UNIMPLEMENTED;
break;
default:
eprintf("Unsupported matrix combinations\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
break;
}
break;
default:
eprintf("Unknown matrix type '%d'\n",in[0]->type);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
break;
}
break;
default:
eprintf("Unknown operation '%d'\n",oper);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
break;
}
}
if (times) {
dl_stop_timer(&op_tmr);
dl_start_timer(&aux_tmr);
}
if (rperm) {
matrix_unpermute(out,rperm,NULL);
}
if (times) {
dl_stop_timer(&aux_tmr);
dl_start_timer(&io_tmr);
}
/* save the output */
j = __get_file_type(outfile);
if (__is_dense(j)) {
err = gooseberry_write_dense_matrix(j,outfile,out->nrows,out->ncols,
out->rowval);
} else {
err = gooseberry_write_sparse_matrix(j,outfile,out->nrows,out->ncols,
out->rowptr,out->rowind,out->rowval);
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_stop_timer(&io_tmr);
dl_print_header("Times",'#');
printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr));
printf(" Auxillary: %0.04lf\n",dl_poll_timer(&aux_tmr));
printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr));
dl_print_footer('#');
}
END:
if (out) {
matrix_free(out);
}
if (rperm) {
dl_free(rperm);
}
if (cperm) {
dl_free(cperm);
}
for (i=0;i<ninfiles;++i) {
if (in[i]) {
matrix_free(in[i]);
in[i] = NULL;
}
}
if (args) {
dl_free(args);
}
return err;
}
static int __cgd(
int argc,
char ** argv)
{
dl_timer_t io_tmr, op_tmr;
real_t error = 0;
size_t nargs, runs, r, i, niter = 0;
int times, j, err;
dim_t outrows, outcols, prows;
cmd_arg_t * args = NULL;
dim_t * rpk = NULL, * cpk = NULL, *rperm = NULL, *cperm = NULL, *order;
matrix_t * mat = NULL, * vec = NULL, * out = NULL;
const char * matfile = NULL, * vecfile = NULL, * outfile = NULL,
* rpf = NULL, * cpf = NULL;
/* set defaults */
times = 0;
runs = 1;
err = cmd_parse_args(argc-2,argv+2,CGD_OPTS,NCGD_OPTS,&args,&nargs);
if (err != DL_CMDLINE_SUCCESS) {
return GOOSEBERRY_ERROR_INVALIDINPUT;
}
err = GOOSEBERRY_SUCCESS;
if (nargs < 2) {
__command_usage(argv[0],argv[1],CGD_OPTS,NCGD_OPTS,stderr);
goto END;
}
for (i=0;i<nargs;++i) {
switch (args[i].id) {
case CGD_OPTION_HELP:
__command_usage(argv[0],argv[1],CGD_OPTS,NCGD_OPTS,stdout);
goto END;
break;
case CGD_OPTION_ERROR:
error = (real_t)args[i].val.f;
break;
case CGD_OPTION_NITER:
niter = (size_t)args[i].val.i;
break;
case CGD_OPTION_INFILE:
if (matfile == NULL) {
matfile = args[i].val.s;
} else if (vecfile == NULL) {
vecfile = args[i].val.s;
} else {
eprintf("Too many input files specified\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
break;
case CGD_OPTION_OUTFILE:
outfile = args[i].val.s;
break;
case CGD_OPTION_TIME:
times = 1;
break;
case CGD_OPTION_RUNS:
runs = (size_t)args[i].val.i;
break;
#ifndef NO_OMP
case CGD_OPTION_THREADS:
omp_set_num_threads(args[i].val.i);
break;
#endif
case CGD_OPTION_ROWPERM:
rpf = args[i].val.s;
break;
case CGD_OPTION_COLPERM:
cpf = args[i].val.s;
break;
default:
eprintf("Unknown argument '%s'\n",args[i].val.s);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
break;
}
}
if (matfile == NULL || vecfile == NULL) {
eprintf("You must specify both a matrix input file and a vector input "
"file (in that order).\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_init_timer(&io_tmr);
dl_init_timer(&op_tmr);
dl_start_timer(&io_tmr);
}
/* read in input files */
j = __get_file_type(matfile);
if (j < 0) {
eprintf("Unknown file format of '%s'\n",matfile);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
goto END;
} else {
/* read in the matrix */
mat = matrix_calloc(1);
if (__is_dense(j)) {
err = gooseberry_read_dense_matrix(j,matfile,&(mat->nrows),
&(mat->ncols),&(mat->rowval));
if (mat->ncols == 1 || mat->nrows == 1) {
mat->type = MATRIX_TYPE_DENSE_VECTOR;
} else {
mat->type = MATRIX_TYPE_DENSE_MATRIX;
}
} else {
err = gooseberry_read_sparse_matrix(j,matfile,&(mat->nrows),
&(mat->ncols),&(mat->rowptr),&(mat->rowind),&(mat->rowval));
if (mat->ncols == 1 || mat->nrows == 1) {
mat->type = MATRIX_TYPE_SPARSE_VECTOR;
} else {
mat->type = MATRIX_TYPE_SPARSE_MATRIX;
}
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
j = __get_file_type(vecfile);
if (j < 0) {
eprintf("Unknown file format of '%s'\n",vecfile);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
goto END;
} else {
/* read in the vector */
vec = matrix_calloc(1);
if (__is_dense(j)) {
err = gooseberry_read_dense_matrix(j,vecfile,&(vec->nrows),
&(vec->ncols),&(vec->rowval));
if (vec->ncols == 1 || vec->nrows == 1) {
vec->type = MATRIX_TYPE_DENSE_VECTOR;
} else {
vec->type = MATRIX_TYPE_DENSE_MATRIX;
}
} else {
err = gooseberry_read_sparse_matrix(j,vecfile,&(vec->nrows),
&(vec->ncols),&(vec->rowptr),&(vec->rowind),&(vec->rowval));
if (vec->ncols == 1 || vec->nrows == 1) {
vec->type = MATRIX_TYPE_SPARSE_VECTOR;
} else {
vec->type = MATRIX_TYPE_SPARSE_MATRIX;
}
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
printf("matrix = "PF_DIM_T"x"PF_DIM_T" vector = "PF_DIM_T"x"PF_DIM_T"\n",
mat->nrows,mat->ncols,vec->nrows,vec->ncols);
/* read in permutation files if provided */
if (rpf) {
prows = mat->nrows;
err = gooseberry_read_labels(rpf,&prows,&rpk);
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
if (cpf) {
prows = mat->ncols;
err = gooseberry_read_labels(cpf,&prows,&cpk);
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
if (times) {
dl_stop_timer(&io_tmr);
}
/* permute the input matrices */
if (rpk) {
rperm = dim_alloc(mat->nrows);
order = dim_alloc(mat->nrows);
dim_incset(order,0,1,mat->nrows);
dd_countingsort_kv(rpk,order,0,mat->nrows,mat->nrows,rperm,
&mat->rdist);
dl_free(order);
dl_free(rpk);
rpk = NULL;
}
if (cpk) {
cperm = dim_alloc(mat->ncols);
order = dim_alloc(mat->ncols);
dim_incset(order,0,1,mat->ncols);
dd_countingsort_kv(cpk,order,0,mat->ncols,mat->ncols,cperm,NULL);
dl_free(order);
dl_free(cpk);
cpk = NULL;
}
matrix_permute(mat,rperm,cperm);
matrix_permute(vec,rperm,NULL);
/* allocate the output matrix */
outrows = mat->ncols;
outcols = 1;
out = matrix_alloc(1);
j = __get_file_type(outfile);
if (__is_dense(j)) {
if (outrows == 1|| outcols == 1) {
matrix_init(MATRIX_TYPE_DENSE_VECTOR,outrows,outcols,0,out);
} else {
matrix_init(MATRIX_TYPE_DENSE_MATRIX,outrows,outcols,0,out);
}
} else {
if (outrows == 1|| outcols == 1) {
matrix_init(MATRIX_TYPE_SPARSE_VECTOR,outrows,outcols,NULL_IND,out);
} else {
matrix_init(MATRIX_TYPE_SPARSE_MATRIX,outrows,outcols,NULL_IND,out);
}
}
if (times) {
dl_start_timer(&op_tmr);
}
for (r=0;r<runs;++r) {
/* perform operation */
if ((err = cgd(mat,vec,out,error,niter)) != GOOSEBERRY_SUCCESS) {
goto END;
}
}
if (times) {
dl_stop_timer(&op_tmr);
}
if (cperm) {
matrix_unpermute(out,cperm,NULL);
}
if (times) {
dl_start_timer(&io_tmr);
}
/* save the output */
err = gooseberry_write_dense_matrix(GOOSEBERRY_FORMAT_GRID,outfile,
out->nrows,out->ncols,out->rowval);
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_stop_timer(&io_tmr);
dl_print_header("Times",'#');
printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr));
printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr));
dl_print_footer('#');
}
END:
if (mat) {
matrix_free(mat);
}
if (vec) {
matrix_free(vec);
}
if (out) {
matrix_free(out);
}
if (rperm) {
dl_free(rperm);
}
if (rpk) {
dl_free(rpk);
}
if (cperm) {
dl_free(cperm);
}
if (cpk) {
dl_free(cpk);
}
if (args) {
dl_free(args);
}
return err;
}
static int __sgd(
int argc,
char ** argv)
{
int err = GOOSEBERRY_SUCCESS;
#ifdef XXX
int times, j, err;
dl_timer_t io_tmr, op_tmr;
real_t error = 0;
size_t nargs, runs, r, i, niter = 0;
dim_t outrows, outcols, prows;
cmd_arg_t * args = NULL;
dim_t * rpk = NULL, * cpk = NULL, *rperm = NULL, *cperm = NULL, *order;
matrix_t * mat = NULL, * vec = NULL, * out = NULL;
const char * matfile = NULL, * ufile = NULL, * vfile, * rpf = NULL,
* cpf = NULL;
/* set defaults */
times = 0;
runs = 1;
err = cmd_parse_args(argc-2,argv+2,SGD_OPTS,NSGD_OPTS,&args,&nargs);
if (err != DL_CMDLINE_SUCCESS) {
return GOOSEBERRY_ERROR_INVALIDINPUT;
}
if (nargs < 2) {
__command_usage(argv[0],argv[1],SGD_OPTS,NSGD_OPTS,stderr);
goto END;
}
for (i=0;i<nargs;++i) {
switch (args[i].id) {
case SGD_OPTION_HELP:
__command_usage(argv[0],argv[1],SGD_OPTS,NSGD_OPTS,stdout);
goto END;
break;
case SGD_OPTION_ERROR:
error = (real_t)args[i].val.f;
break;
case SGD_OPTION_NITER:
niter = (size_t)args[i].val.i;
break;
case SGD_OPTION_INFILE:
if (matfile == NULL) {
matfile = args[i].val.s;
} else if (vecfile == NULL) {
vecfile = args[i].val.s;
} else {
eprintf("Too many input files specified\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
break;
case SGD_OPTION_OUTFILE:
outfile = args[i].val.s;
break;
case SGD_OPTION_TIME:
times = 1;
break;
case SGD_OPTION_RUNS:
runs = (size_t)args[i].val.i;
break;
#ifndef NO_OMP
case SGD_OPTION_THREADS:
omp_set_num_threads(args[i].val.i);
break;
#endif
case SGD_OPTION_ROWPERM:
rpf = args[i].val.s;
break;
case SGD_OPTION_COLPERM:
cpf = args[i].val.s;
break;
default:
eprintf("Unknown argument '%s'\n",args[i].val.s);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
break;
}
}
if (matfile == NULL || vecfile == NULL) {
eprintf("You must specify both a matrix input file and a vector input "
"file (in that order).\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_init_timer(&io_tmr);
dl_init_timer(&op_tmr);
dl_start_timer(&io_tmr);
}
/* read in input files */
j = __get_file_type(matfile);
if (j < 0) {
eprintf("Unknown file format of '%s'\n",matfile);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
goto END;
} else {
/* read in the matrix */
mat = matrix_calloc(1);
if (__is_dense(j)) {
err = gooseberry_read_dense_matrix(j,matfile,&(mat->nrows),
&(mat->ncols),&(mat->rowval));
if (mat->ncols == 1 || mat->nrows == 1) {
mat->type = MATRIX_TYPE_DENSE_VECTOR;
} else {
mat->type = MATRIX_TYPE_DENSE_MATRIX;
}
} else {
err = gooseberry_read_sparse_matrix(j,matfile,&(mat->nrows),
&(mat->ncols),&(mat->rowptr),&(mat->rowind),&(mat->rowval));
if (mat->ncols == 1 || mat->nrows == 1) {
mat->type = MATRIX_TYPE_SPARSE_VECTOR;
} else {
mat->type = MATRIX_TYPE_SPARSE_MATRIX;
}
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
j = __get_file_type(vecfile);
if (j < 0) {
eprintf("Unknown file format of '%s'\n",vecfile);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
goto END;
} else {
/* read in the vector */
vec = matrix_calloc(1);
if (__is_dense(j)) {
err = gooseberry_read_dense_matrix(j,vecfile,&(vec->nrows),
&(vec->ncols),&(vec->rowval));
if (vec->ncols == 1 || vec->nrows == 1) {
vec->type = MATRIX_TYPE_DENSE_VECTOR;
} else {
vec->type = MATRIX_TYPE_DENSE_MATRIX;
}
} else {
err = gooseberry_read_sparse_matrix(j,vecfile,&(vec->nrows),
&(vec->ncols),&(vec->rowptr),&(vec->rowind),&(vec->rowval));
if (vec->ncols == 1 || vec->nrows == 1) {
vec->type = MATRIX_TYPE_SPARSE_VECTOR;
} else {
vec->type = MATRIX_TYPE_SPARSE_MATRIX;
}
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
printf("matrix = "PF_DIM_T"x"PF_DIM_T" vector = "PF_DIM_T"x"PF_DIM_T"\n",
mat->nrows,mat->ncols,vec->nrows,vec->ncols);
/* read in permutation files if provided */
if (rpf) {
prows = mat->nrows;
err = gooseberry_read_labels(rpf,&prows,&rpk);
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
if (cpf) {
prows = mat->ncols;
err = gooseberry_read_labels(cpf,&prows,&cpk);
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
if (times) {
dl_stop_timer(&io_tmr);
}
/* permute the input matrices */
if (rpk) {
rperm = dim_alloc(mat->nrows);
order = dim_alloc(mat->nrows);
dim_incset(order,0,1,mat->nrows);
dim_countingsort_v(rpk,order,rperm,0,mat->nrows,mat->nrows);
dl_free(order);
dl_free(rpk);
rpk = NULL;
}
if (cpk) {
cperm = dim_alloc(mat->ncols);
order = dim_alloc(mat->ncols);
dim_incset(order,0,1,mat->ncols);
dim_countingsort_v(cpk,order,cperm,0,mat->ncols,mat->ncols);
dl_free(order);
dl_free(cpk);
cpk = NULL;
}
matrix_permute(mat,rperm,cperm);
matrix_permute(vec,rperm,NULL);
/* allocate the output matrix */
outrows = mat->ncols;
outcols = 1;
out = matrix_alloc(1);
j = __get_file_type(outfile);
if (__is_dense(j)) {
if (outrows == 1|| outcols == 1) {
matrix_init(MATRIX_TYPE_DENSE_VECTOR,outrows,outcols,0,out);
} else {
matrix_init(MATRIX_TYPE_DENSE_MATRIX,outrows,outcols,0,out);
}
} else {
if (outrows == 1|| outcols == 1) {
matrix_init(MATRIX_TYPE_SPARSE_VECTOR,outrows,outcols,NULL_IND,out);
} else {
matrix_init(MATRIX_TYPE_SPARSE_MATRIX,outrows,outcols,NULL_IND,out);
}
}
if (times) {
dl_start_timer(&op_tmr);
}
for (r=0;r<runs;++r) {
/* perform operation */
if ((err = cgd(mat,vec,out,error,niter)) != GOOSEBERRY_SUCCESS) {
goto END;
}
}
if (times) {
dl_stop_timer(&op_tmr);
}
if (cperm) {
matrix_unpermute(out,cperm,NULL);
}
if (times) {
dl_start_timer(&io_tmr);
}
/* save the output */
err = gooseberry_write_dense_matrix(GOOSEBERRY_FORMAT_GRID,outfile,
out->nrows,out->ncols,out->rowval);
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_stop_timer(&io_tmr);
dl_print_header("Times",'#');
printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr));
printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr));
dl_print_footer('#');
}
END:
if (mat) {
matrix_free(mat);
}
if (vec) {
matrix_free(vec);
}
if (out) {
matrix_free(out);
}
if (rperm) {
dl_free(rperm);
}
if (rpk) {
dl_free(rpk);
}
if (cperm) {
dl_free(cperm);
}
if (cpk) {
dl_free(cpk);
}
if (args) {
dl_free(args);
}
#endif
return err;
}
static int __pagerank(
int argc,
char ** argv)
{
dl_timer_t io_tmr, op_tmr, pre_tmr, mul_tmr;
size_t nargs,i,iter,runs,r;
int times, j, err;
ind_t l;
dim_t prows, k, nsinks, m;
real_t minerror, error, damping, diff, dist, deg, wgt;
cmd_arg_t * args = NULL;
real_t * rank = NULL, * indeg = NULL;
dim_t * pk = NULL, * perm = NULL, * order = NULL, * sinks = NULL;
matrix_t * mat = NULL, * out = NULL;
const char * matfile = NULL, * outfile = NULL, * pf = NULL;
/* set defaults */
times = 0;
runs = 1;
minerror = 0.0;
iter = 0;
damping = 0.85;
err = cmd_parse_args(argc-2,argv+2,PAGERANK_OPTS,NPAGERANK_OPTS,&args,
&nargs);
if (err != DL_CMDLINE_SUCCESS) {
return GOOSEBERRY_ERROR_INVALIDINPUT;
}
err = GOOSEBERRY_SUCCESS;
if (nargs < 2) {
__command_usage(argv[0],argv[1],PAGERANK_OPTS,NPAGERANK_OPTS,stderr);
goto END;
}
for (i=0;i<nargs;++i) {
switch (args[i].id) {
case PAGERANK_OPTION_HELP:
__command_usage(argv[0],argv[1],PAGERANK_OPTS,NPAGERANK_OPTS,stdout);
goto END;
break;
case PAGERANK_OPTION_INFILE:
if (matfile == NULL) {
matfile = args[i].val.s;
} else {
eprintf("Too many input files specified\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
break;
case PAGERANK_OPTION_OUTFILE:
outfile = args[i].val.s;
break;
case PAGERANK_OPTION_TIME:
times = 1;
break;
case PAGERANK_OPTION_PERM:
pf = args[i].val.s;
break;
case PAGERANK_OPTION_RUNS:
runs = (size_t)args[i].val.i;
break;
case PAGERANK_OPTION_NITER:
iter = (size_t)args[i].val.i;
break;
case PAGERANK_OPTION_DAMPING:
damping = (real_t)args[i].val.f;
break;
case PAGERANK_OPTION_ERROR:
minerror = (real_t)args[i].val.f;
break;
#ifndef NO_OMP
case PAGERANK_OPTION_THREADS:
omp_set_num_threads(args[i].val.i);
break;
#endif
default:
eprintf("Unknown argument '%s'\n",args[i].val.s);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
break;
}
}
if (matfile == NULL) {
eprintf("You must specify a matrix input file.\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
if (outfile == NULL) {
eprintf("You must specify an output vector file.\n");
err = GOOSEBERRY_ERROR_INVALIDINPUT;
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_init_timer(&io_tmr);
dl_init_timer(&op_tmr);
dl_init_timer(&pre_tmr);
dl_init_timer(&mul_tmr);
dl_start_timer(&io_tmr);
}
/* read in input files */
j = __get_file_type(matfile);
if (j < 0) {
eprintf("Unknown file format of '%s'\n",matfile);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
goto END;
} else {
/* read in the matrix */
mat = matrix_calloc(1);
if (__is_dense(j)) {
err = gooseberry_read_dense_matrix(j,matfile,&(mat->nrows),
&(mat->ncols),&(mat->rowval));
if (mat->ncols == 1 || mat->nrows == 1) {
mat->type = MATRIX_TYPE_DENSE_VECTOR;
} else {
mat->type = MATRIX_TYPE_DENSE_MATRIX;
}
} else {
err = gooseberry_read_sparse_matrix(j,matfile,&(mat->nrows),
&(mat->ncols),&(mat->rowptr),&(mat->rowind),&(mat->rowval));
if (mat->ncols == 1 || mat->nrows == 1) {
mat->type = MATRIX_TYPE_SPARSE_VECTOR;
} else {
mat->type = MATRIX_TYPE_SPARSE_MATRIX;
}
}
if (mat->nrows != mat->ncols) {
if (mat->type == MATRIX_TYPE_DENSE_MATRIX) {
eprintf("PageRank requires a square matrix, but input matrix '%s' is "
PF_DIM_T"x"PF_DIM_T".\n",matfile,mat->nrows,mat->ncols);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
} else {
if (mat->nrows > mat->ncols) {
mat->ncols = mat->nrows;
} else if (mat->ncols > mat->nrows) {
/* stretch the matrix */
mat->rowptr = ind_realloc(mat->rowptr,mat->ncols+1);
ind_set(mat->rowptr+mat->nrows+1,mat->rowptr[mat->nrows],
mat->ncols-mat->nrows);
mat->nrows = mat->ncols;
}
}
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
/* read in permutation file if provided */
if (pf) {
prows = mat->nrows;
err = gooseberry_read_labels(pf,&prows,&pk);
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
}
if (times) {
dl_stop_timer(&io_tmr);
dl_start_timer(&pre_tmr);
}
/* permute the input matrix */
if (pk) {
perm = dim_alloc(mat->nrows);
order = dim_alloc(mat->nrows);
dim_incset(order,0,1,mat->nrows);
dd_countingsort_kv(pk,order,0,mat->nrows,mat->nrows,perm,
&mat->rdist);
dl_free(order);
dl_free(pk);
pk = NULL;
matrix_permute(mat,perm,perm);
}
/* normalize input matrix and find sinks */
sinks = dim_alloc(mat->nrows);
nsinks = 0;
indeg = real_calloc(mat->ncols);
switch (mat->type) {
case MATRIX_TYPE_SPARSE_MATRIX:
for (k=0;k<mat->nrows;++k) {
if (mat->rowptr[k] == mat->rowptr[k+1]) {
sinks[nsinks++] = k;
} else {
for (l=mat->rowptr[k];l<mat->rowptr[k+1];++l) {
indeg[mat->rowind[l]] += mat->rowval[l];
}
}
}
for (k=0;k<mat->nrows;++k) {
for (l=mat->rowptr[k];l<mat->rowptr[k+1];++l) {
mat->rowval[l] /= indeg[mat->rowind[l]];
}
}
break;
case MATRIX_TYPE_DENSE_MATRIX:
for (k=0;k<mat->nrows;++k) {
deg = 0;
for (m=0;m<mat->ncols;++m) {
wgt = mat->rowval[(k*mat->ncols)+m];
if (wgt != 0) {
deg += wgt;
indeg[m] += wgt;
}
}
if (deg == 0) {
sinks[nsinks++] = k;
}
}
for (k=0;k<mat->nrows;++k) {
for (m=0;m<mat->ncols;++m) {
mat->rowval[(k*mat->ncols)+m] /= indeg[m];
}
}
break;
default:
eprintf("Unknown matrix type: %d\n",mat->type);
err = GOOSEBERRY_ERROR_INVALIDINPUT;
goto END;
}
dl_free(indeg);
indeg = NULL;
/* allocate output matrix */
out = matrix_calloc(1);
matrix_init(MATRIX_TYPE_DENSE_VECTOR,mat->ncols,1,0,out);
if (times) {
dl_stop_timer(&pre_tmr);
dl_start_timer(&op_tmr);
}
/* peform pagerank */
rank = real_alloc(mat->ncols);
for (r=0;r<runs;++r) {
real_set(rank,1.0/out->nrows,out->nrows);
for (i=0;iter==0||i<iter;++i) {
if (times) {
dl_start_timer(&mul_tmr);
}
gooseberry_spmult(mat->nrows,mat->ncols,1,mat->rowptr,mat->rowind,
mat->rowval,rank,out->rowval,NULL,0);
if (times) {
dl_stop_timer(&mul_tmr);
}
gooseberry_scale(out->nrows,out->rowval,damping);
/* redistrubite sunk ranks */
dist = 0;
for (k=0;k<nsinks;++k) {
dist += rank[sinks[k]];
}
gooseberry_add_scalar(out->nrows,out->rowval,
((1.0-damping)/out->nrows) + dist);
/* only check RMSE every 10 iterations */
if (i%10 == 0) {
#ifndef NO_OMP
#pragma omp parallel default(none) shared(out,rank,error) private(diff)
{
error = 0.0;
#pragma omp for schedule(static,OMP_BIG_BLOCK) \
reduction(+:error)
for (k=0;k<out->nrows;++k) {
diff = rank[k] - out->rowval[k];
error += diff*diff;
}
}
#else
error = 0.0;
for (k=0;k<out->nrows;++k) {
diff = rank[k] - out->rowval[k];
error += diff*diff;
}
#endif
if (error <= minerror*minerror) {
++i;
/* skip recalculating RMSE */
goto FINISH;
}
}
dl_swap(rank,out->rowval);
}
/* check error when finished */
#ifndef NO_OMP
#pragma omp parallel default(none) shared(out,rank,error) private(diff)
{
error = 0.0;
#pragma omp for schedule(static,OMP_BIG_BLOCK) \
reduction(+:error)
for (k=0;k<out->nrows;++k) {
diff = rank[k] - out->rowval[k];
error += diff*diff;
}
}
#else
error = 0.0;
for (k=0;k<out->nrows;++k) {
diff = rank[k] - out->rowval[k];
error += diff*diff;
}
#endif
FINISH:
error = sqrt(error);
}
printf("PageRank finished in %zu iterations with an RMSE of "PF_REAL_T"\n",
i,error);
if (times) {
dl_stop_timer(&op_tmr);
}
if (perm) {
matrix_unpermute(out,perm,NULL);
}
if (times) {
dl_start_timer(&io_tmr);
}
/* save the output */
j = __get_file_type(outfile);
if (__is_dense(j)) {
if (out->type != MATRIX_TYPE_DENSE_VECTOR &&
out->type != MATRIX_TYPE_DENSE_MATRIX) {
matrix_densify(out);
}
err = gooseberry_write_dense_matrix(j,outfile,out->nrows,out->ncols,
out->rowval);
} else {
if (out->type != MATRIX_TYPE_SPARSE_VECTOR &&
out->type != MATRIX_TYPE_SPARSE_MATRIX) {
matrix_sparsify(out);
}
err = gooseberry_write_sparse_matrix(j,outfile,out->nrows,out->ncols,
out->rowptr,out->rowind,out->rowval);
}
if (err != GOOSEBERRY_SUCCESS) {
goto END;
}
if (times) {
dl_stop_timer(&io_tmr);
dl_print_header("Times",'#');
printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr));
printf(" Preprocessing: %0.04lf\n",dl_poll_timer(&pre_tmr));
printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr));
printf(" SpMV: %0.04lf\n",dl_poll_timer(&mul_tmr));
dl_print_footer('#');
}
END:
if (indeg) {
dl_free(indeg);
}
if (sinks) {
dl_free(sinks);
}
if (rank) {
dl_free(rank);
}
if (mat) {
matrix_free(mat);
}
if (perm) {
dl_free(perm);
}
if (pk) {
dl_free(pk);
}
if (out) {
matrix_free(out);
}
if (args) {
dl_free(args);
}
return err;
}
typedef int (*__cmdfuncptr_t)(int,char**);
static const __cmdfuncptr_t COMMAND_FUNCS[] = {
[COMMAND_HELP] = __help,
[COMMAND_ANALYSIS] = __analyze,
[COMMAND_PERMUTE] = __permute,
[COMMAND_TRANSFORM] = __transform,
[COMMAND_GENERATE] = __generate,
[COMMAND_BLAS] = __blas,
[COMMAND_CGD] = __cgd,
[COMMAND_SGD] = __sgd,
[COMMAND_PAGERANK] = __pagerank
};
/* don't get burned */
DL_STATIC_ASSERT(ARRAY_SIZE(COMMANDS) == ARRAY_SIZE(COMMAND_FUNCS));
/******************************************************************************
* MAIN ************************************************************************
******************************************************************************/
int main(
int argc,
char ** argv)
{
int err;
char * cmdstr;
size_t i;
dl_init_rand();
if (argc < 2) {
eprintf("Must supply a command.\n");
__usage(argv[0],stderr);
return 1;
}
cmdstr = argv[1];
for (i=0;i<NCOMMANDS;++i) {
if (COMMANDS[i].str != NULL && strcmp(cmdstr,COMMANDS[i].str) == 0) {
err = COMMAND_FUNCS[i](argc,argv);
break;
}
}
if (i == NCOMMANDS) {
eprintf("Unrecognized command '%s'.\n",cmdstr);
__usage(argv[0],stderr);
return 1;
}
if (err == GOOSEBERRY_SUCCESS) {
return 0;
} else {
eprintf("Operation failed.\n");
return 2;
}
}
#endif
|
15_blur_parallel.c | #include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <omp.h>
#define NX 1002
#define NY 1002
void blur(int *image, size_t szx, size_t szy, size_t iters){
int *temp = malloc(sizeof(int) * szx * szy);
for (size_t i = 0; i< NX*NY; ++i) temp[i]=image[i];
for (size_t iit = 0; iit < iters; ++iit){
#pragma omp parallel for
for (size_t ix = 1; ix< szx-1; ++ix){
for (size_t iy = 1; iy< szy-1; ++iy){
temp[iy + ix * szy] = (int)(0.25 * (float)(image[iy + (ix+1) * szy] +
image[iy + (ix-1) * szy] + image[(iy-1) + ix * szy] +
image[(iy+1) + ix * szy]) + 0.5);
}
}
for (size_t i = 0; i < (szx * szy); ++i){
image[i] = temp[i];
}
}
free(temp);
}
int main(){
int image[(NX)*(NY)];
struct timespec t1, t2;
float dtime;
for (size_t i = 0; i< NX*NY; ++i) image[i]=5;
printf("OpenMP code running on %i threads\n",omp_get_max_threads());
clock_gettime(CLOCK_REALTIME, &t1);
blur(image,NX,NY, 10000);
clock_gettime(CLOCK_REALTIME, &t2);
dtime = (float)(t2.tv_sec - t1.tv_sec) + ((float)(t2.tv_nsec - t1.tv_nsec)
/1.0e9);
printf("Time taken was %f seconds\n",dtime);
printf("Arbitrary value from image %i\n",image[100]);
printf("Arbitrary value printed to avoid compiler optimising the blur out\n");
}
|
csr_matvec_oomp.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "seq_mv.h"
#include "_hypre_utilities.hpp"
#if defined(HYPRE_USING_DEVICE_OPENMP)
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
/* y[offset:end] = alpha*A[offset:end,:]*x + beta*b[offset:end] */
HYPRE_Int
hypre_CSRMatrixMatvecOMPOffload( HYPRE_Int trans,
HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y,
HYPRE_Int offset )
{
hypre_CSRMatrix *B;
if (trans)
{
hypre_CSRMatrixTransposeDevice(A, &B, 1);
/* HYPRE_CUDA_CALL(cudaDeviceSynchronize()); */
}
else
{
B = A;
}
HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(B);
HYPRE_Complex *A_data = hypre_CSRMatrixData(B);
HYPRE_Int *A_i = hypre_CSRMatrixI(B);
HYPRE_Int *A_j = hypre_CSRMatrixJ(B);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int i;
#pragma omp target teams distribute parallel for private(i) is_device_ptr(A_data, A_i, A_j, y_data, x_data)
for (i = offset; i < A_nrows; i++)
{
HYPRE_Complex tempx = 0.0;
HYPRE_Int j;
for (j = A_i[i]; j < A_i[i + 1]; j++)
{
tempx += A_data[j] * x_data[A_j[j]];
}
y_data[i] = alpha * tempx + beta * y_data[i];
}
/* HYPRE_CUDA_CALL(cudaDeviceSynchronize()); */
return hypre_error_flag;
}
#endif /* #if defined(HYPRE_USING_DEVICE_OPENMP) */
|
heat-mpi-ompss.c | /*****************************************************************************\
* ANALYSIS PERFORMANCE TOOLS *
* Extrae *
* Instrumentation package for parallel applications *
*****************************************************************************
* ___ This library is free software; you can redistribute it and/or *
* / __ modify it under the terms of the GNU LGPL as published *
* / / _____ by the Free Software Foundation; either version 2.1 *
* / / / \ of the License, or (at your option) any later version. *
* ( ( ( B S C ) *
* \ \ \_____/ This library is distributed in hope that it will be *
* \ \__ useful but WITHOUT ANY WARRANTY; without even the *
* \___ implied warranty of MERCHANTABILITY or FITNESS FOR A *
* PARTICULAR PURPOSE. See the GNU LGPL for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with this library; if not, write to the Free Software Foundation, *
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA *
* The GNU LEsser General Public License is contained in the file COPYING. *
* --------- *
* Barcelona Supercomputing Center - Centro Nacional de Supercomputacion *
\*****************************************************************************/
/*
* Iterative solver for heat distribution
*/
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include "heat.h"
void usage( char *s )
{
fprintf(stderr, "Usage: %s <input file> [result file]\n\n", s);
}
int main( int argc, char *argv[] )
{
unsigned iter;
FILE *infile, *resfile;
char *resfilename;
int myid, numprocs;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
if (myid == 0)
{
// algorithmic parameters
algoparam_t param;
int np;
double runtime, flop;
double residual=0.0;
// check arguments
if( argc < 2 )
{
usage( argv[0] );
return 1;
}
// check input file
if( !(infile=fopen(argv[1], "r")) )
{
fprintf(stderr, "\nError: Cannot open \"%s\" for reading.\n\n", argv[1]);
usage(argv[0]);
return 1;
}
// check result file
resfilename= (argc>=3) ? argv[2]:"heat.ppm";
if( !(resfile=fopen(resfilename, "w")) )
{
fprintf(stderr, "\nError: Cannot open \"%s\" for writing.\n\n", resfilename);
usage(argv[0]);
return 1;
}
// check input
if( !read_input(infile, ¶m) )
{
fprintf(stderr, "\nError: Error parsing input file.\n\n");
usage(argv[0]);
return 1;
}
print_params(¶m);
// set the visualization resolution
param.u = 0;
param.uhelp = 0;
param.uvis = 0;
param.visres = param.resolution;
if (!initialize(¶m) )
{
fprintf(stderr, "Error in Solver initialization.\n\n");
usage(argv[0]);
return 1;
}
// full size (param.resolution are only the inner points)
np = param.resolution + 2;
// starting time
runtime = wtime();
// send to workers the necessary data to perform computation
int first_row = 1;
int last_row;
for (int i=0; i<numprocs; i++)
{
last_row = first_row + param.resolution/numprocs - 1;
last_row = (i < (param.resolution%numprocs)) ? last_row+1 : last_row;
if (i>0)
{
MPI_Send(¶m.maxiter, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(¶m.resolution, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(¶m.algorithm, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(&first_row, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(&last_row, 1, MPI_INT, i, 0, MPI_COMM_WORLD);
MPI_Send(¶m.u[(first_row-1)*np], (last_row-first_row+3)*np,
MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
MPI_Send(¶m.uhelp[(first_row-1)*np], (last_row-first_row+3)*np,
MPI_DOUBLE, i, 0, MPI_COMM_WORLD);
}
first_row = last_row + 1;
}
first_row = 1;
last_row = first_row + param.resolution/numprocs - 1;
last_row = ((param.resolution%numprocs)>0) ? last_row+1 : last_row;
int rows = last_row-first_row+1;
iter = 0;
while(1)
{
switch( param.algorithm )
{
case 0: // JACOBI
{
double *uu, *uhelp;
uu = param.u;
uhelp = param.uhelp;
#pragma omp task in (*uu) out (*uhelp) out (residual) label (compute)
residual = relax_jacobi(uu, uhelp, rows+2, np);
printf ("Residual in main %lf\n", residual);
#pragma omp task inout (*uhelp) label (comm) inout (residual)
{
MPI_Sendrecv(&uhelp[last_row*np], np, MPI_DOUBLE, 1, 0,
&uhelp[(last_row+1)*np], np, MPI_DOUBLE, 1, 0,
MPI_COMM_WORLD, &status);
}
// Copy uhelp into u
#pragma omp task in (*uhelp) out (*uu) label (copy)
for (int i=first_row-1; i<last_row+2; i++)
for (int j=0; j<np; j++)
uu[ i*np+j ] = uhelp[ i*np+j ];
}
break;
case 1: // RED-BLACK
residual = relax_redblack(param.u, np, np);
break;
case 2: // GAUSS
residual = relax_gauss(param.u, np, np);
break;
}
iter++;
// solution good enough ?
#pragma omp task inout (residual) label (allreduce)
MPI_Allreduce(MPI_IN_PLACE, &residual, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
if (iter %10 ==0)
{
#pragma omp taskwait
if (residual < 0.00005) break;
// max. iteration reached ? (no limit with maxiter=0)
if (param.maxiter>0 && iter>=param.maxiter)
break;
}
}
#pragma omp taskwait
// receive from workers the portions of matrix u computed
first_row = 1;
for (int i=0; i<numprocs; i++)
{
last_row = first_row + param.resolution/numprocs - 1;
last_row = (i < (param.resolution%numprocs)) ? last_row+1 : last_row;
if (i>0)
MPI_Recv(¶m.u[first_row*np], (last_row-first_row+1)*(np),
MPI_DOUBLE, i, 0, MPI_COMM_WORLD, &status);
first_row = last_row + 1;
}
// Flop count after iter iterations
flop = iter * 11.0 * param.resolution * param.resolution;
// stopping time
runtime = wtime() - runtime;
fprintf(stdout, "Time: %04.3f ", runtime);
fprintf(stdout, "(%3.3f GFlop => %6.2f MFlop/s)\n",
flop/1000000000.0,
flop/runtime/1000000);
fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual, iter);
// for plot...
coarsen( param.u, np, np,
param.uvis, param.visres+2, param.visres+2 );
write_image( resfile, param.uvis,
param.visres+2,
param.visres+2 );
finalize( ¶m );
MPI_Finalize();
}
else
{
algoparam_t param;
// receive information from master to perform computation locally
int rows, np;
int first_row, last_row;
int iter;
double residual;
MPI_Recv(¶m.maxiter, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
MPI_Recv(¶m.resolution, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
MPI_Recv(¶m.algorithm, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
MPI_Recv(&first_row, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
MPI_Recv(&last_row, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
rows = last_row-first_row+1;
np = param.resolution + 2;
// allocate memory for worker
param.u = calloc( sizeof(double), (rows+2)*np );
param.uhelp = calloc( sizeof(double), (rows+2)*np );
if( (!param.u) || (!param.uhelp) )
{
fprintf(stderr, "Error: Cannot allocate memory\n");
return 0;
}
// fill initial values for matrix with values received from master
MPI_Recv(param.u, (rows+2)*np, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &status);
MPI_Recv(param.uhelp, (rows+2)*np, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &status);
iter = 0;
while(1)
{
switch( param.algorithm )
{
case 0: // JACOBI
{
double *uu, *uhelp;
uu = param.u;
uhelp = param.uhelp;
#pragma omp task in (*uu) out (*uhelp) out (residual) label (compute)
residual = relax_jacobi(uu, uhelp, rows+2, np);
#pragma omp task inout (*uhelp) label (comm) inout (residual)
{
if (myid < (numprocs-1))
{
MPI_Sendrecv(&uhelp[rows*np], np, MPI_DOUBLE, myid+1, 0,
&uhelp[(rows+1)*np], np, MPI_DOUBLE, myid+1, 0,
MPI_COMM_WORLD, &status);
}
MPI_Sendrecv(&uhelp[np], np, MPI_DOUBLE, myid-1, 0,
&uhelp[0], np, MPI_DOUBLE, myid-1, 0,
MPI_COMM_WORLD, &status);
}
// Copy uhelp into u
#pragma omp task in (*uhelp) out(*uu) label (copy)
for (int i=0; i<rows+2; i++)
for (int j=0; j<np; j++)
uu[ i*np+j ] = uhelp[ i*np+j ];
}
break;
case 1: // RED-BLACK
residual = relax_redblack(param.u, np, np);
break;
case 2: // GAUSS
residual = relax_gauss(param.u, np, np);
break;
}
iter++;
// solution good enough ?
#pragma omp task inout (residual) label (allreduce)
MPI_Allreduce(MPI_IN_PLACE, &residual, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
if (iter %10 == 0)
{
#pragma omp taskwait
if (residual < 0.00005) break;
// max. iteration reached ? (no limit with maxiter=0)
if (param.maxiter>0 && iter>=param.maxiter) break;
}
}
#pragma omp taskwait
// send values computed for matrix to the master
MPI_Send(¶m.u[np], rows*np, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD);
if( param.u )
free(param.u);
if( param.uhelp )
free(param.uhelp);
MPI_Finalize();
}
return 0;
}
|
axhelm.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
extern "C" void axhelm_v0(const dlong & Nelements,
const dlong & offset,
const dfloat * __restrict__ ggeo ,
const dfloat * __restrict__ D ,
const dfloat & lambda,
const dfloat * __restrict__ q ,
dfloat * __restrict__ Aq ){
dfloat s_q [p_Nq][p_Nq][p_Nq];
dfloat s_Gqr[p_Nq][p_Nq][p_Nq];
dfloat s_Gqs[p_Nq][p_Nq][p_Nq];
dfloat s_Gqt[p_Nq][p_Nq][p_Nq];
dfloat s_D[p_Nq][p_Nq];
for(int j=0;j<p_Nq;++j){
for(int i=0;i<p_Nq;++i){
s_D[j][i] = D[j*p_Nq+i];
}
}
#pragma omp parallel for private(s_q, s_Gqr, s_Gqs, s_Gqt)
for(dlong e=0; e<Nelements; ++e){
const dlong element = e;
for(int k = 0; k < p_Nq; k++) {
for(int j=0;j<p_Nq;++j){
for(int i=0;i<p_Nq;++i){
const dlong base = i + j*p_Nq + k*p_Nq*p_Nq + element*p_Np;
const dfloat qbase = q[base];
s_q[k][j][i] = qbase;
}
}
}
for(int k=0;k<p_Nq;++k){
for(int j=0;j<p_Nq;++j){
for(int i=0;i<p_Nq;++i){
const dlong gbase = element*p_Nggeo*p_Np + k*p_Nq*p_Nq + j*p_Nq + i;
const dfloat r_G00 = ggeo[gbase+p_G00ID*p_Np];
const dfloat r_G01 = ggeo[gbase+p_G01ID*p_Np];
const dfloat r_G11 = ggeo[gbase+p_G11ID*p_Np];
const dfloat r_G12 = ggeo[gbase+p_G12ID*p_Np];
const dfloat r_G02 = ggeo[gbase+p_G02ID*p_Np];
const dfloat r_G22 = ggeo[gbase+p_G22ID*p_Np];
dfloat qr = 0.f;
dfloat qs = 0.f;
dfloat qt = 0.f;
for(int m = 0; m < p_Nq; m++) {
qr += s_D[i][m]*s_q[k][j][m];
qs += s_D[j][m]*s_q[k][m][i];
qt += s_D[k][m]*s_q[m][j][i];
}
dfloat Gqr = r_G00*qr + r_G01*qs + r_G02*qt;
dfloat Gqs = r_G01*qr + r_G11*qs + r_G12*qt;
dfloat Gqt = r_G02*qr + r_G12*qs + r_G22*qt;
s_Gqr[k][j][i] = Gqr;
s_Gqs[k][j][i] = Gqs;
s_Gqt[k][j][i] = Gqt;
}
}
}
for(int k = 0;k <p_Nq; k++){
for(int j=0;j<p_Nq;++j){
for(int i=0;i<p_Nq;++i){
const dlong gbase = element*p_Nggeo*p_Np + k*p_Nq*p_Nq + j*p_Nq + i;
const dfloat r_GwJ = ggeo[gbase+p_GWJID*p_Np];
const dfloat r_Aq = r_GwJ*lambda*s_q[k][j][i];
dfloat r_Aqr = 0, r_Aqs = 0, r_Aqt = 0;
for(int m = 0; m < p_Nq; m++) {
r_Aqr += s_D[m][i]*s_Gqr[k][j][m];
r_Aqs += s_D[m][j]*s_Gqs[k][m][i];
r_Aqt += s_D[m][k]*s_Gqt[m][j][i];
}
const dlong id = element*p_Np +k*p_Nq*p_Nq+ j*p_Nq + i;
Aq[id] = r_Aqr + r_Aqs + r_Aqt +r_Aq;
}
}
}
}
}
extern "C" void axhelm_n3_v0(const dlong & Nelements,
const dlong & offset,
const dfloat * __restrict__ ggeo ,
const dfloat * __restrict__ D ,
const dfloat & lambda,
const dfloat * __restrict__ q ,
dfloat * __restrict__ Aq ){
dfloat s_q [3][p_Nq][p_Nq][p_Nq];
dfloat s_Gqr[3][p_Nq][p_Nq][p_Nq];
dfloat s_Gqs[3][p_Nq][p_Nq][p_Nq];
dfloat s_Gqt[3][p_Nq][p_Nq][p_Nq];
dfloat s_D[p_Nq][p_Nq];
for(int j=0;j<p_Nq;++j){
for(int i=0;i<p_Nq;++i){
s_D[j][i] = D[j*p_Nq+i];
}
}
#pragma omp parallel for private(s_q, s_Gqr, s_Gqs, s_Gqt)
for(dlong e=0; e<Nelements; ++e){
const dlong element = e;
for(int k = 0; k < p_Nq; k++) {
for(int j=0;j<p_Nq;++j){
for(int i=0;i<p_Nq;++i){
const dlong base = i + j*p_Nq + k*p_Nq*p_Nq + element*p_Np;
s_q[0][k][j][i] = q[base + 0*offset];
s_q[1][k][j][i] = q[base + 1*offset];
s_q[2][k][j][i] = q[base + 2*offset];
}
}
}
for(int k=0;k<p_Nq;++k){
for(int j=0;j<p_Nq;++j){
for(int i=0;i<p_Nq;++i){
const dlong gbase = element*p_Nggeo*p_Np + k*p_Nq*p_Nq + j*p_Nq + i;
const dfloat r_G00 = ggeo[gbase+p_G00ID*p_Np];
const dfloat r_G01 = ggeo[gbase+p_G01ID*p_Np];
const dfloat r_G11 = ggeo[gbase+p_G11ID*p_Np];
const dfloat r_G12 = ggeo[gbase+p_G12ID*p_Np];
const dfloat r_G02 = ggeo[gbase+p_G02ID*p_Np];
const dfloat r_G22 = ggeo[gbase+p_G22ID*p_Np];
const dlong id = element*p_Np + k*p_Nq*p_Nq + j*p_Nq + i;
dfloat qr0 = 0.f, qr1 = 0.f, qr2 = 0.f;
dfloat qs0 = 0.f, qs1 = 0.f, qs2 = 0.f;
dfloat qt0 = 0.f, qt1 = 0.f, qt2 = 0.f;
for(int m = 0; m < p_Nq; m++) {
qr0 += s_D[i][m]*s_q[0][k][j][m];
qs0 += s_D[j][m]*s_q[0][k][m][i];
qt0 += s_D[k][m]*s_q[0][m][j][i];
//
qr1 += s_D[i][m]*s_q[1][k][j][m];
qs1 += s_D[j][m]*s_q[1][k][m][i];
qt1 += s_D[k][m]*s_q[1][m][j][i];
//
qr2 += s_D[i][m]*s_q[2][k][j][m];
qs2 += s_D[j][m]*s_q[2][k][m][i];
qt2 += s_D[k][m]*s_q[2][m][j][i];
}
//
s_Gqr[0][k][j][i] = r_G00*qr0 + r_G01*qs0 + r_G02*qt0;
s_Gqs[0][k][j][i] = r_G01*qr0 + r_G11*qs0 + r_G12*qt0;
s_Gqt[0][k][j][i] = r_G02*qr0 + r_G12*qs0 + r_G22*qt0;
s_Gqr[1][k][j][i] = r_G00*qr1 + r_G01*qs1 + r_G02*qt1;
s_Gqs[1][k][j][i] = r_G01*qr1 + r_G11*qs1 + r_G12*qt1;
s_Gqt[1][k][j][i] = r_G02*qr1 + r_G12*qs1 + r_G22*qt1;
s_Gqr[2][k][j][i] = r_G00*qr2 + r_G01*qs2 + r_G02*qt2;
s_Gqs[2][k][j][i] = r_G01*qr2 + r_G11*qs2 + r_G12*qt2;
s_Gqt[2][k][j][i] = r_G02*qr2 + r_G12*qs2 + r_G22*qt2;
}
}
}
for(int k = 0;k < p_Nq; k++){
for(int j=0;j<p_Nq;++j){
for(int i=0;i<p_Nq;++i){
const dlong gbase = element*p_Nggeo*p_Np + k*p_Nq*p_Nq + j*p_Nq + i;
const dfloat r_GwJ = ggeo[gbase+p_GWJID*p_Np];
const dlong id = element*p_Np +k*p_Nq*p_Nq+ j*p_Nq + i;
const dfloat r_lam01 = lambda;
const dfloat r_lam11 = lambda;
const dfloat r_lam21 = lambda;
dfloat r_Aq0 = r_GwJ*r_lam01*s_q[0][k][j][i];
dfloat r_Aq1 = r_GwJ*r_lam11*s_q[1][k][j][i];
dfloat r_Aq2 = r_GwJ*r_lam21*s_q[2][k][j][i];
dfloat r_Aqr0 = 0, r_Aqs0 = 0, r_Aqt0 = 0;
dfloat r_Aqr1 = 0, r_Aqs1 = 0, r_Aqt1 = 0;
dfloat r_Aqr2 = 0, r_Aqs2 = 0, r_Aqt2 = 0;
for(int m = 0; m < p_Nq; m++){
r_Aqr0 += s_D[m][i]*s_Gqr[0][k][j][m];
r_Aqr1 += s_D[m][i]*s_Gqr[1][k][j][m];
r_Aqr2 += s_D[m][i]*s_Gqr[2][k][j][m];
r_Aqs0 += s_D[m][j]*s_Gqs[0][k][m][i];
r_Aqs1 += s_D[m][j]*s_Gqs[1][k][m][i];
r_Aqs2 += s_D[m][j]*s_Gqs[2][k][m][i];
r_Aqt0 += s_D[m][k]*s_Gqt[0][m][j][i];
r_Aqt1 += s_D[m][k]*s_Gqt[1][m][j][i];
r_Aqt2 += s_D[m][k]*s_Gqt[2][m][j][i];
}
Aq[id + 0*offset] = r_Aqr0 + r_Aqs0 + r_Aqt0 +r_Aq0;
Aq[id + 1*offset] = r_Aqr1 + r_Aqs1 + r_Aqt1 +r_Aq1;
Aq[id + 2*offset] = r_Aqr2 + r_Aqs2 + r_Aqt2 +r_Aq2;
}
}
}
}
}
|
prepress.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR EEEEE PPPP RRRR EEEEE SSSSS SSSSS %
% P P R R E P P R R E SS SS %
% PPPP RRRR EEE PPPP RRRR EEE SSS SSS %
% P R R E P R R E SS SS %
% P R R EEEEE P R R EEEEE SSSSS SSSSS %
% %
% %
% MagickCore Prepress Methods %
% %
% Software Design %
% John Cristy %
% October 2001 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-view.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/prepress.h"
#include "magick/registry.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T o t a l I n k D e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageTotalInkDensity() returns the total ink density for a CMYK image.
% Total Ink Density (TID) is determined by adding the CMYK values in the
% darkest shadow area in an image.
%
% The format of the GetImageTotalInkDensity method is:
%
% double GetImageTotalInkDensity(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport double GetImageTotalInkDensity(Image *image)
{
CacheView
*image_view;
double
total_ink_density;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",image->filename);
return(0.0);
}
status=MagickTrue;
total_ink_density=0.0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
density;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
density=(double) GetRedPixelComponent(p)+GetGreenPixelComponent(p)+
GetBluePixelComponent(p)+GetIndexPixelComponent(indexes+x);
if (density > total_ink_density)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageTotalInkDensity)
#endif
{
if (density > total_ink_density)
total_ink_density=density;
}
p++;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
total_ink_density=0.0;
return(total_ink_density);
}
|
hcb_basis_core.h | #ifndef _HCB_BASIS_CORE_H
#define _HCB_BASIS_CORE_H
#include <complex>
#include <vector>
#include <iostream>
#include "general_basis_core.h"
#include "numpy/ndarraytypes.h"
#include "benes_perm.h"
#include "openmp.h"
namespace basis_general {
template<class I>
I inline hcb_map_bits(I s,const int map[],const int N){
I ss = 0;
for(int i=N-1;i>=0;--i){
int j = map[i];
ss ^= (j<0 ? ((s&1)^1)<<(N+j) : (s&1)<<(N-j-1) );
s >>= 1;
}
return ss;
}
template<class I,class P=signed char>
class hcb_basis_core : public general_basis_core<I,P>
{
public:
std::vector<tr_benes<I>> benes_maps;
std::vector<I> invs;
hcb_basis_core(const int _N, const bool _fermionic=false) : \
general_basis_core<I>::general_basis_core(_N,_fermionic) {}
hcb_basis_core(const int _N,const int _nt,const int _maps[], \
const int _pers[], const int _qs[], const bool _fermionic=false) : \
general_basis_core<I>::general_basis_core(_N,_nt,_maps,_pers,_qs,_fermionic) {
benes_maps.resize(_nt);
invs.resize(_nt);
ta_index<I> index;
for(int j=0;j<bit_info<I>::bits;j++){index.data[j] = no_index;}
for(int i=0;i<_nt;i++){
const int * map = &general_basis_core<I,P>::maps[i*_N];
I inv = 0;
for(int j=0;j<_N;j++){
int m = map[j];
int bit_j = _N - j - 1;
if(m<0){
int bit_m = _N + m;
index.data[bit_j] = bit_m;
inv ^= ((I)1 << bit_j);
}
else{
int bit_m = _N - m -1;
index.data[bit_j] = bit_m;
}
}
gen_benes<I>(&benes_maps[i],index);
invs[i] = inv;
}
}
~hcb_basis_core() {}
I map_state(I s,int n_map,P &sign){
if(general_basis_core<I,P>::nt<=0){
return s;
}
return benes_bwd(&benes_maps[n_map],s^invs[n_map]);
}
void map_state(I s[],npy_intp M,int n_map,P sign[]){
if(general_basis_core<I,P>::nt<=0){
return;
}
const tr_benes<I> * benes_map = &benes_maps[n_map];
const I inv = invs[n_map];
#pragma omp for schedule(static)
for(npy_intp i=0;i<M;i++){
s[i] = benes_bwd(benes_map,s[i]^inv);
}
}
std::vector<int> count_particles(const I s){
std::vector<int> v(1);
v[0] = bit_count(s,general_basis_core<I,P>::N);
return v;
}
// I map_state(I s,int n_map,int &sign){
// if(general_basis_core<I,P>::nt<=0){
// return s;
// }
// const int n = general_basis_core<I,P>::N;
// return hcb_map_bits(s,&general_basis_core<I,P>::maps[n_map*n],n);
// }
// void map_state(I s[],npy_intp M,int n_map,signed char sign[]){
// if(general_basis_core<I,P>::nt<=0){
// return;
// }
// const int n = general_basis_core<I,P>::N;
// const int * map = &general_basis_core<I,P>::maps[n_map*n];
// #pragma omp for schedule(static,1)
// for(npy_intp i=0;i<M;i++){
// s[i] = hcb_map_bits(s[i],map,n);
// }
// }
I inline next_state_pcon(const I s,const I nns){
if(s==0){return s;}
I t = (s | (s - 1)) + 1;
return t | ((((t & (0-t)) / (s & (0-s))) >> 1) - 1);
}
int op(I &r,std::complex<double> &m,const int n_op,const char opstr[],const int indx[]){
const I s = r;
const I one = 1;
for(int j=n_op-1;j>-1;j--){
const int ind = general_basis_core<I,P>::N-indx[j]-1;
const I b = (one << ind);
const bool a = (bool)((r >> ind)&one);
const char op = opstr[j];
switch(op){
case 'z':
m *= (a?0.5:-0.5);
break;
case 'n':
m *= (a?1:0);
break;
case 'x':
r ^= b;
m *= 0.5;
break;
case 'y':
m *= (a?std::complex<double>(0,0.5):std::complex<double>(0,-0.5));
r ^= b;
break;
case '+':
m *= (a?0:1);
r ^= b;
break;
case '-':
m *= (a?1:0);
r ^= b;
break;
case 'I':
break;
default:
return -1;
}
if(std::abs(m)==0){
r = s;
break;
}
}
return 0;
}
};
}
#endif
|
ParallelOpenMP.h | #pragma once
#include <ATen/ATen.h>
#include <cstddef>
#include <exception>
#ifdef _OPENMP
#define INTRA_OP_PARALLEL
#include <omp.h>
#endif
namespace at {
template <class F>
inline void parallel_for(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const F& f) {
TORCH_CHECK(grain_size >= 0);
if (begin >= end) {
return;
}
#ifdef _OPENMP
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
#pragma omp parallel if (!omp_in_parallel() && ((end - begin) >= grain_size))
{
int64_t num_threads = omp_get_num_threads();
int64_t tid = omp_get_thread_num();
int64_t chunk_size = divup((end - begin), num_threads);
int64_t begin_tid = begin + tid * chunk_size;
if (begin_tid < end) {
try {
f(begin_tid, std::min(end, chunk_size + begin_tid));
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
}
}
if (eptr) {
std::rethrow_exception(eptr);
}
#else
f(begin, end);
#endif
}
template <class scalar_t, class F, class SF>
inline scalar_t parallel_reduce(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const scalar_t ident,
const F& f,
const SF& sf) {
TORCH_CHECK(grain_size >= 0);
if (begin >= end) {
return ident;
} else if (in_parallel_region() || get_num_threads() == 1) {
return f(begin, end, ident);
} else {
const int64_t num_results = divup((end - begin), grain_size);
std::vector<scalar_t> results(num_results);
scalar_t* results_data = results.data();
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
#pragma omp parallel for if ((end - begin) >= grain_size)
for (int64_t id = 0; id < num_results; id++) {
int64_t i = begin + id * grain_size;
try {
results_data[id] = f(i, i + std::min(end - i, grain_size), ident);
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
}
if (eptr) {
std::rethrow_exception(eptr);
}
scalar_t result = ident;
for (auto partial_result : results) {
result = sf(result, partial_result);
}
return result;
}
}
} // namespace at
|
GB_binop__isne_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int32)
// A*D function (colscale): GB (_AxD__isne_int32)
// D*A function (rowscale): GB (_DxB__isne_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int32)
// C=scalar+B GB (_bind1st__isne_int32)
// C=scalar+B' GB (_bind1st_tran__isne_int32)
// C=A+scalar GB (_bind2nd__isne_int32)
// C=A'+scalar GB (_bind2nd_tran__isne_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT32 || GxB_NO_ISNE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
zpotrs.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_potrs
*
* Solves a system of linear equations A * X = B with a Hermitian positive
* definite in the complex matrix A using the Cholesky factorization
* A = U^H*U or A = L*L^H computed by plasma_zpotrf.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of
* columns of the matrix B. nrhs >= 0.
*
* @param[in,out] pA
* The triangular factor U or L from the Cholesky
* factorization A = U^H*U or A = L*L^H, computed by
* plasma_zpotrf.
* Remark: If out-of-place layout translation is used, the
* matrix A can be considered as input, however if inplace
* layout translation is enabled, the content of A will be
* reordered for computation and restored before exiting the
* function.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
* @param[in,out] pB
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_zpotrs
* @sa plasma_cpotrs
* @sa plasma_dpotrs
* @sa plasma_spotrs
* @sa plasma_zpotrf
*
******************************************************************************/
int plasma_zpotrs(plasma_enum_t uplo,
int n, int nrhs,
plasma_complex64_t *pA, int lda,
plasma_complex64_t *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -3;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -5;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -7;
}
// quick return
if (imax(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_trsm(plasma, PlasmaComplexDouble, n, n);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize tile matrix descriptors.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, &sequence, &request);
plasma_omp_zge2desc(pB, ldb, B, &sequence, &request);
// Call the tile async function.
plasma_omp_zpotrs(uplo, A, B, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_potrs
*
* Solves a system of linear equations using previously
* computed Cholesky factorization.
* Non-blocking tile version of plasma_zpotrs().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] A
* The triangular factor U or L from the Cholesky factorization
* A = U^H*U or A = L*L^H, computed by plasma_zpotrf.
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zpotrs
* @sa plasma_omp_zpotrs
* @sa plasma_omp_cpotrs
* @sa plasma_omp_dpotrs
* @sa plasma_omp_spotrs
* @sa plasma_omp_zpotrf
*
******************************************************************************/
void plasma_omp_zpotrs(plasma_enum_t uplo, plasma_desc_t A, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0 || B.n == 0)
return;
// Call the parallel functions.
plasma_pztrsm(PlasmaLeft, uplo,
uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans,
PlasmaNonUnit,
1.0, A,
B,
sequence, request);
plasma_pztrsm(PlasmaLeft, uplo,
uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans,
PlasmaNonUnit,
1.0, A,
B,
sequence, request);
}
|
mkldnn_common.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2019 by Contributors
* \file mkldnn_common.h
* \brief Common header file for MKLDNN backend subgraph
* \author Ciyong Chen
*/
#ifndef MXNET_OPERATOR_SUBGRAPH_MKLDNN_MKLDNN_COMMON_H_
#define MXNET_OPERATOR_SUBGRAPH_MKLDNN_MKLDNN_COMMON_H_
#if MXNET_USE_MKLDNN == 1
#include <vector>
namespace mxnet {
namespace op {
template <typename DType>
static std::vector<float> GetWeightScales(const NDArray &weight, const NDArray *bias,
const float data_scale, bool weight_channelwise_scale) {
auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
std::vector<float> weight_scales;
const DType *weight_ptr = weight.data().dptr<DType>();
const DType *bias_ptr = bias? bias->data().dptr<DType>() : nullptr;
const auto wshape = weight.shape();
size_t channel = wshape[0];
size_t offset = wshape.ProdShape(1, wshape.ndim());
std::vector<DType> weight_c_min(channel, MaxValue<DType>());
std::vector<DType> weight_c_max(channel, MinValue<DType>());
for (int c = 0; c < static_cast<int>(channel); ++c) {
const DType *p1 = weight_ptr + c * offset;
for (size_t k = 0; k < offset; ++k) {
if (weight_c_min[c] > p1[k])
weight_c_min[c] = p1[k];
if (weight_c_max[c] < p1[k])
weight_c_max[c] = p1[k];
}
}
if (weight_channelwise_scale) {
weight_scales.resize(channel);
#pragma omp parallel for num_threads(nthreads)
for (int c = 0; c < static_cast<int>(channel); ++c) {
float scale = GetQuantizeScale(mshadow::kInt8, weight_c_min[c], weight_c_max[c]);
if (bias_ptr && bias_ptr[c]) {
// avoid overflow on bias
// TODO(zhennan): mkldnn has bug to handle INT_MAX in bias, so set the maximum value of bias
// to INT_MAX / 2.
float scale_max =
static_cast<float>(bias_ptr[c] > 0 ? MaxValue<int32_t>() : MinValue<int32_t>()) / 2 /
bias_ptr[c] / data_scale;
scale = Min(scale, scale_max);
}
weight_scales[c] = scale;
}
} else {
DType total_min = weight_c_min[0];
DType total_max = weight_c_max[0];
for (size_t c = 0; c < channel; ++c) {
if (total_min > weight_c_min[c]) total_min = weight_c_min[c];
if (total_max < weight_c_max[c]) total_max = weight_c_max[c];
}
weight_scales.resize(3);
weight_scales[0] = GetQuantizeScale(mshadow::kInt8, total_min, total_max);
weight_scales[1] = total_min;
weight_scales[2] = total_max;
}
return weight_scales;
}
static void ConvertWeightBias2MKLDNN(NDArray *weight, NDArray *bias, bool has_bias,
const mkldnn::memory::desc &weight_md,
const mkldnn::memory::desc *bias_md,
const int num_group, float data_scale,
const std::vector<float> &weight_scales,
const bool submit = true) {
MKLDNNStream *stream = MKLDNNStream::Get();
const auto new_weight = NDArray(weight_md);
const auto conv_weights_memory = new_weight.GetMKLDNNData();
mkldnn::primitive_attr weight_attr;
if (weight_scales.size()) {
const int weight_mask = (weight_scales.size()) == 1 ? 0 : 1;
weight_attr.set_output_scales(weight_mask, weight_scales);
}
auto default_weights_memory = GetWeights(*weight, num_group);
if (default_weights_memory == nullptr) default_weights_memory = weight->GetMKLDNNData();
const auto weight_reorder_pd =
mkldnn::reorder::primitive_desc(*default_weights_memory, *conv_weights_memory, weight_attr);
MKLDNNStream::Get()->RegisterPrimArgs(
mkldnn::reorder(weight_reorder_pd),
{{MKLDNN_ARG_FROM, *default_weights_memory}, {MKLDNN_ARG_TO, *conv_weights_memory}});
NDArray new_bias;
if (has_bias && data_scale) {
std::vector<float> bias_scales(weight_scales.size());
for (size_t c = 0; c < weight_scales.size(); ++c) {
bias_scales[c] = weight_scales[c] * data_scale;
}
new_bias = NDArray(*bias_md);
const auto conv_bias_memory = new_bias.GetMKLDNNData();
const int bias_mask = (bias_scales.size()) == 1 ? 0 : 1;
mkldnn::primitive_attr bias_attr;
bias_attr.set_output_scales(bias_mask, bias_scales);
auto bias_weights_memory = bias->GetMKLDNNData();
const auto bias_reorder_pd =
mkldnn::reorder::primitive_desc(*bias_weights_memory, *conv_bias_memory, bias_attr);
MKLDNNStream::Get()->RegisterPrimArgs(
mkldnn::reorder(bias_reorder_pd),
{{MKLDNN_ARG_FROM, *bias_weights_memory}, {MKLDNN_ARG_TO, *conv_bias_memory}});
}
if (submit)
stream->Submit();
*weight = new_weight;
if (has_bias && data_scale) *bias = new_bias;
}
} // namespace op
} // namespace mxnet
#endif // if MXNET_USE_MKLDNN == 1
#endif // MXNET_OPERATOR_SUBGRAPH_MKLDNN_MKLDNN_COMMON_H_
|
DRB045-doall1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
/*
Simplest one dimension array computation
*/
#include <omp.h>
int a[100];
int main()
{
int i;
#pragma omp parallel for private (i)
for (i = 0; i <= 99; i += 1) {
a[i] = i;
}
#pragma omp parallel for private (i)
for (i = 0; i <= 99; i += 1) {
a[i] = a[i] + 1;
}
for (i = 0; i <= 99; i += 1) {
printf("%d\n",a[i]);
}
return 0;
}
|
GB_subassign_09.c | //------------------------------------------------------------------------------
// GB_subassign_09: C(I,J)<M,repl> = scalar ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 09: C(I,J)<M,repl> = scalar ; using S
// M: present
// Mask_comp: false
// C_replace: true
// accum: NULL
// A: scalar
// S: constructed
// C: not bitmap or full
#include "GB_unused.h"
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_09
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (&S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (M) ;
GB_GET_C ; // C must not be bitmap
GB_GET_MASK ;
GB_GET_SCALAR ;
GB_GET_S ;
GrB_BinaryOp accum = NULL ;
//--------------------------------------------------------------------------
// Method 09: C(I,J)<M,repl> = scalar ; using S
//--------------------------------------------------------------------------
// Time: Optimal. All entries in M+S must be examined. All entries in S
// are modified: if M(i,j)=1 then S(i,j) is used to write to the
// corresponding entry in C. If M(i,j) is not present, or zero, then the
// entry in C is cleared (because of C_replace). If S(i,j) is not present,
// and M(i,j)=1, then the scalar is inserted into C. The only case that
// can be skipped is if neither S nor M is present. As a result, this
// method need not traverse all of IxJ. It can limit its traversal to the
// pattern of M+S.
// Method 09 and Method 11 are very similar.
//--------------------------------------------------------------------------
// Parallel: M+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20)
//--------------------------------------------------------------------------
if (M_is_bitmap)
{
// all of IxJ must be examined
GB_SUBASSIGN_IXJ_SLICE ;
}
else
{
// traverse all M+S
GB_SUBASSIGN_TWO_SLICE (M, S) ;
}
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
if (M_is_bitmap)
{
//----------------------------------------------------------------------
// phase1: M is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iM_start, iM_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iM_start:iM_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iM_start) ;
int64_t pM_start = j * Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j)
//--------------------------------------------------------------
for (int64_t iM = iM_start ; iM < iM_end ; iM++)
{
int64_t pM = pM_start + iM ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ;
bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ;
if (Sfound && !mij)
{
// S (i,j) is present but M (i,j) is false
// ----[C A 0] or [X A 0]-------------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
else if (!Sfound && mij)
{
// S (i,j) is not present, M (i,j) is true
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
else if (Sfound && mij)
{
// S (i,j) present and M (i,j) is true
GB_C_S_LOOKUP ;
// ----[C A 1] or [X A 1]-------------------------------
// [C A 1]: action: ( =A ): copy A, no accum
// [X A 1]: action: ( undelete ): zombie lives
GB_noaccum_C_A_1_scalar ;
GB_NEXT (S) ;
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase1: M is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get S(:,j) and M(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and M(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
// int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and M (:,j) have entries
while (pS < pS_end && pM < pM_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iM = GBI (Mi, pM, Mvlen) ;
if (iS < iM)
{
// S (i,j) is present but M (i,j) is not
// ----[C A 0] or [X A 0]-------------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
else if (iM < iS)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (M) ;
}
else
{
// both S (i,j) and M (i,j) present
GB_C_S_LOOKUP ;
if (GB_mcast (Mx, pM, msize))
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =A ): copy A, no accum
// [X A 1]: action: ( undelete ): zombie lives
GB_noaccum_C_A_1_scalar ;
}
else
{
// ----[C A 0] or [X A 0]---------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): now zombie
GB_DELETE_ENTRY ;
}
GB_NEXT (S) ;
GB_NEXT (M) ;
}
}
// while list S (:,j) has entries. List M (:,j) exhausted.
while (pS < pS_end)
{
// S (i,j) is present but M (i,j) is not
// ----[C A 0] or [X A 0]-----------------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
// while list M (:,j) has entries. List S (:,j) exhausted.
while (pM < pM_end)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (M) ;
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
if (M_is_bitmap)
{
//----------------------------------------------------------------------
// phase2: M is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iM_start, iM_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iM_start:iM_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iM_start) ;
int64_t pM_start = j * Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
for (int64_t iM = iM_start ; iM < iM_end ; iM++)
{
int64_t pM = pM_start + iM ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ;
bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ;
if (!Sfound && mij)
{
// S (i,j) is not present, M (i,j) is true
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
else if (Sfound)
{
// S (i,j) present
GB_NEXT (S) ;
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase2: M is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get S(:,j) and M(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and M(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and M (:,j) have entries
while (pS < pS_end && pM < pM_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iM = GBI (Mi, pM, Mvlen) ;
if (iS < iM)
{
// S (i,j) is present but M (i,j) is not
GB_NEXT (S) ;
}
else if (iM < iS)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
GB_NEXT (M) ;
}
else
{
// both S (i,j) and M (i,j) present
GB_NEXT (S) ;
GB_NEXT (M) ;
}
}
// while list M (:,j) has entries. List S (:,j) exhausted.
while (pM < pM_end)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
int64_t iM = GBI (Mi, pM, Mvlen) ;
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
GB_NEXT (M) ;
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
GB_binop__pow_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__pow_uint64
// A.*B function (eWiseMult): GB_AemultB__pow_uint64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__pow_uint64
// C+=b function (dense accum): GB_Cdense_accumb__pow_uint64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_uint64
// C=scalar+B GB_bind1st__pow_uint64
// C=scalar+B' GB_bind1st_tran__pow_uint64
// C=A+scalar GB_bind2nd__pow_uint64
// C=A'+scalar GB_bind2nd_tran__pow_uint64
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = GB_pow_uint64 (aij, bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_pow_uint64 (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_UINT64 || GxB_NO_POW_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__pow_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__pow_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__pow_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__pow_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__pow_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__pow_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t bij = Bx [p] ;
Cx [p] = GB_pow_uint64 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__pow_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
Cx [p] = GB_pow_uint64 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_pow_uint64 (x, aij) ; \
}
GrB_Info GB_bind1st_tran__pow_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_pow_uint64 (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__pow_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
solution-omp.h | /*
author: mark@mkmark.net
time: O()
space: O()
Runtime:
Memory Usage:
*/
#include <vector>
#include <unordered_map>
#include <algorithm>
#include <execution>
class Solution
{
public:
std::vector<long long> getDistances(std::vector<int> &arr)
{
std::vector<long long> intervals(arr.size());
std::unordered_map<int, std::vector<int>> indices_map;
std::vector<int> indices_in_indices_map(arr.size());
for (int i = 0; i < arr.size(); ++i)
{
std::vector<int> ¤t_indices_map = indices_map[arr[i]];
indices_in_indices_map[i] = current_indices_map.size();
current_indices_map.push_back(i);
}
#pragma omp parallel
{
#pragma omp single
{
for (auto it = indices_map.begin(); it != indices_map.end(); ++it)
#pragma omp task
{
auto indices = it->second;
for (int j = 0; j < indices.size(); ++j)
{
long long sum = 0;
int i = indices[j];
if (j == 0)
{
for (int k = 0; k < indices.size(); ++k)
{
sum += indices[k];
}
sum += i * (2 * j - indices.size());
intervals[i] = sum;
continue;
}
int previous_i = indices[j - 1];
sum = intervals[previous_i];
int delta = i - previous_i;
sum += delta * (2 * j - indices.size());
intervals[i] = sum;
}
}
}
}
return intervals;
}
}; |
GB_unop__exp_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__exp_fc64_fc64)
// op(A') function: GB (_unop_tran__exp_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = cexp (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cexp (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = cexp (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXP || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__exp_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = cexp (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = cexp (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__exp_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint64_uint64
// op(A') function: GB_tran__abs_uint64_uint64
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint64_uint64
(
uint64_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
chemm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zhemm.c, normal z -> c, Fri Sep 28 17:38:06 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_hemm
*
* Performs one of the matrix-matrix operations
*
* \f[ C = \alpha \times A \times B + \beta \times C \f]
* or
* \f[ C = \alpha \times B \times A + \beta \times C \f]
*
* where alpha and beta are scalars, A is a Hermitian matrix and B and
* C are m-by-n matrices.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether the Hermitian matrix A appears on the
* left or right in the operation as follows:
* - PlasmaLeft: \f[ C = \alpha \times A \times B + \beta \times C \f]
* - PlasmaRight: \f[ C = \alpha \times B \times A + \beta \times C \f]
*
* @param[in] uplo
* Specifies whether the upper or lower triangular part of
* the Hermitian matrix A is to be referenced as follows:
* - PlasmaLower: Only the lower triangular part of the
* Hermitian matrix A is to be referenced.
* - PlasmaUpper: Only the upper triangular part of the
* Hermitian matrix A is to be referenced.
*
* @param[in] m
* The number of rows of the matrix C. m >= 0.
*
* @param[in] n
* The number of columns of the matrix C. n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* A is an lda-by-ka matrix, where ka is m when side = PlasmaLeft,
* and is n otherwise. Only the uplo triangular part is referenced.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,ka).
*
* @param[in] pB
* B is an ldb-by-n matrix, where the leading m-by-n part of
* the array B must contain the matrix B.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] pC
* C is an ldc-by-n matrix.
* On exit, the array is overwritten by the m-by-n updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_chemm
* @sa plasma_chemm
*
******************************************************************************/
int plasma_chemm(plasma_enum_t side, plasma_enum_t uplo,
int m, int n,
plasma_complex32_t alpha, plasma_complex32_t *pA, int lda,
plasma_complex32_t *pB, int ldb,
plasma_complex32_t beta, plasma_complex32_t *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((side != PlasmaLeft) && (side != PlasmaRight)) {
plasma_error("illegal value of side");
return -1;
}
if ((uplo != PlasmaLower) && (uplo != PlasmaUpper)) {
plasma_error("illegal value of uplo");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
int am;
if (side == PlasmaLeft) {
am = m;
}
else {
am = n;
}
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldb < imax(1, m)) {
plasma_error("illegal value of ldb");
return -9;
}
if (ldc < imax(1, m)) {
plasma_error("illegal value of ldc");
return -12;
}
// quick return
if (m == 0 || n == 0 || (alpha == 0.0 && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_symm(plasma, PlasmaComplexFloat, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
am, am, 0, 0, am, am, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
m, n, 0, 0, m, n, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
m, n, 0, 0, m, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_cge2desc(pA, lda, A, &sequence, &request);
plasma_omp_cge2desc(pB, ldb, B, &sequence, &request);
plasma_omp_cge2desc(pC, ldc, C, &sequence, &request);
// Call the tile async function.
plasma_omp_chemm(side, uplo,
alpha, A,
B,
beta, C,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_cdesc2ge(C, pC, ldc, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&C);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
* @ingroup plasma_hemm
*
* Performs Hermitian matrix multiplication.
* Non-blocking tile version of plasma_chemm().
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether the Hermitian matrix A appears on the
* left or right in the operation as follows:
* - PlasmaLeft: \f[ C = \alpha \times A \times B + \beta \times C \f]
* - PlasmaRight: \f[ C = \alpha \times B \times A + \beta \times C \f]
*
* @param[in] uplo
* Specifies whether the upper or lower triangular part of
* the Hermitian matrix A is to be referenced as follows:
* - PlasmaLower: Only the lower triangular part of the
* Hermitian matrix A is to be referenced.
* - PlasmaUpper: Only the upper triangular part of the
* Hermitian matrix A is to be referenced.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] B
* Descriptor of matrix B.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* Descriptor of matrix C.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
*******************************************************************************
*
* @sa plasma_chemm
* @sa plasma_omp_chemm
*
******************************************************************************/
void plasma_omp_chemm(plasma_enum_t side, plasma_enum_t uplo,
plasma_complex32_t alpha, plasma_desc_t A,
plasma_desc_t B,
plasma_complex32_t beta, plasma_desc_t C,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((side != PlasmaLeft) &&
(side != PlasmaRight)) {
plasma_error("illegal value of side");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((uplo != PlasmaLower) &&
(uplo != PlasmaUpper)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (C.m == 0 || C.n == 0 || ((alpha == 0.0 || A.n == 0) && beta == 1.0))
return;
// Call the parallel function.
plasma_pchemm(side, uplo,
alpha, A,
B,
beta, C,
sequence, request);
}
|
analyze.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% AAA N N AAA L Y Y ZZZZZ EEEEE %
% A A NN N A A L Y Y ZZ E %
% AAAAA N N N AAAAA L Y ZZZ EEE %
% A A N NN A A L Y ZZ E %
% A A N N A A LLLLL Y ZZZZZ EEEEE %
% %
% Analyze An Image %
% %
% Software Design %
% Bill Corbis %
% December 1998 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
*/
/*
Include declarations.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <math.h>
#include "magick/MagickCore.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% a n a l y z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% analyzeImage() computes the brightness and saturation mean, standard
% deviation, kurtosis and skewness and stores these values as attributes
% of the image.
%
% The format of the analyzeImage method is:
%
% size_t analyzeImage(Image *images,const int argc,
% char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
ModuleExport size_t analyzeImage(Image **images,const int argc,
const char **argv,ExceptionInfo *exception)
{
char
text[MaxTextExtent];
double
area,
brightness,
brightness_mean,
brightness_standard_deviation,
brightness_kurtosis,
brightness_skewness,
brightness_sum_x,
brightness_sum_x2,
brightness_sum_x3,
brightness_sum_x4,
hue,
saturation,
saturation_mean,
saturation_standard_deviation,
saturation_kurtosis,
saturation_skewness,
saturation_sum_x,
saturation_sum_x2,
saturation_sum_x3,
saturation_sum_x4;
Image
*image;
assert(images != (Image **) NULL);
assert(*images != (Image *) NULL);
assert((*images)->signature == MagickSignature);
(void) argc;
(void) argv;
image=(*images);
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
CacheView
*image_view;
ssize_t
y;
MagickBooleanType
status;
brightness_sum_x=0.0;
brightness_sum_x2=0.0;
brightness_sum_x3=0.0;
brightness_sum_x4=0.0;
brightness_mean=0.0;
brightness_standard_deviation=0.0;
brightness_kurtosis=0.0;
brightness_skewness=0.0;
saturation_sum_x=0.0;
saturation_sum_x2=0.0;
saturation_sum_x3=0.0;
saturation_sum_x4=0.0;
saturation_mean=0.0;
saturation_standard_deviation=0.0;
saturation_kurtosis=0.0;
saturation_skewness=0.0;
area=0.0;
status=MagickTrue;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToHSB(GetPixelRed(p),GetPixelGreen(p),GetPixelBlue(p),
&hue,&saturation,&brightness);
brightness*=QuantumRange;
brightness_sum_x+=brightness;
brightness_sum_x2+=brightness*brightness;
brightness_sum_x3+=brightness*brightness*brightness;
brightness_sum_x4+=brightness*brightness*brightness*brightness;
saturation*=QuantumRange;
saturation_sum_x+=saturation;
saturation_sum_x2+=saturation*saturation;
saturation_sum_x3+=saturation*saturation*saturation;
saturation_sum_x4+=saturation*saturation*saturation*saturation;
area++;
p++;
}
}
image_view=DestroyCacheView(image_view);
if (area <= 0.0)
break;
brightness_mean=brightness_sum_x/area;
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_mean);
(void) SetImageProperty(image,"filter:brightness:mean",text);
brightness_standard_deviation=sqrt(brightness_sum_x2/area-(brightness_sum_x/
area*brightness_sum_x/area));
(void) FormatLocaleString(text,MaxTextExtent,"%g",
brightness_standard_deviation);
(void) SetImageProperty(image,"filter:brightness:standard-deviation",text);
if (brightness_standard_deviation != 0)
brightness_kurtosis=(brightness_sum_x4/area-4.0*brightness_mean*
brightness_sum_x3/area+6.0*brightness_mean*brightness_mean*
brightness_sum_x2/area-3.0*brightness_mean*brightness_mean*
brightness_mean*brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation*
brightness_standard_deviation)-3.0;
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_kurtosis);
(void) SetImageProperty(image,"filter:brightness:kurtosis",text);
if (brightness_standard_deviation != 0)
brightness_skewness=(brightness_sum_x3/area-3.0*brightness_mean*
brightness_sum_x2/area+2.0*brightness_mean*brightness_mean*
brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation);
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_skewness);
(void) SetImageProperty(image,"filter:brightness:skewness",text);
saturation_mean=saturation_sum_x/area;
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_mean);
(void) SetImageProperty(image,"filter:saturation:mean",text);
saturation_standard_deviation=sqrt(saturation_sum_x2/area-(saturation_sum_x/
area*saturation_sum_x/area));
(void) FormatLocaleString(text,MaxTextExtent,"%g",
saturation_standard_deviation);
(void) SetImageProperty(image,"filter:saturation:standard-deviation",text);
if (saturation_standard_deviation != 0)
saturation_kurtosis=(saturation_sum_x4/area-4.0*saturation_mean*
saturation_sum_x3/area+6.0*saturation_mean*saturation_mean*
saturation_sum_x2/area-3.0*saturation_mean*saturation_mean*
saturation_mean*saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation*
saturation_standard_deviation)-3.0;
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_kurtosis);
(void) SetImageProperty(image,"filter:saturation:kurtosis",text);
if (saturation_standard_deviation != 0)
saturation_skewness=(saturation_sum_x3/area-3.0*saturation_mean*
saturation_sum_x2/area+2.0*saturation_mean*saturation_mean*
saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation);
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_skewness);
(void) SetImageProperty(image,"filter:saturation:skewness",text);
}
return(MagickImageFilterSignature);
}
|
tasktest.c | #include<stdio.h>
#include<unistd.h>
#include<limits.h>
#include<omp.h>
int main(int argc,char* *argv[])
{
double start=omp_get_wtime();
start=omp_get_wtime();
#pragma omp parallel num_threads(2)
{
#pragma omp single
{
#pragma omp task
{
sleep(5);
}
#pragma omp task
{
sleep(5);
}
#pragma omp task
{
sleep(5);
}
}
}
printf("finish 1 elapsed time=%lf\n",omp_get_wtime()-start);
}
|
DRB001-antidep1-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A loop with loop-carried anti-dependence.
Data race pair: a[i+1]@64:10 vs. a[i]@64:5
*/
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int len = 1000;
int a[1000];
#pragma omp parallel for
for (i=0; i<len; i++)
a[i]= i;
for (i=0;i< len -1 ;i++)
a[i]=a[i+1]+1;
printf ("a[500]=%d\n", a[500] );
return 0;
}
|
Example_private.1.c | /*
* @@name: private.1c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
*/
#include <stdio.h>
#include <assert.h>
int main()
{
int i, j;
int *ptr_i, *ptr_j;
i = 1;
j = 2;
ptr_i = &i;
ptr_j = &j;
#pragma omp parallel private(i) firstprivate(j)
{
i = 3;
j = j + 2;
assert (*ptr_i == 1 && *ptr_j == 2);
}
assert(i == 1 && j == 2);
return 0;
}
|
GB_binop__plus_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_fp32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__plus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fp32)
// A*D function (colscale): GB (_AxD__plus_fp32)
// D*A function (rowscale): GB (_DxB__plus_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fp32)
// C=scalar+B GB (_bind1st__plus_fp32)
// C=scalar+B' GB (_bind1st_tran__plus_fp32)
// C=A+scalar GB (_bind2nd__plus_fp32)
// C=A'+scalar GB (_bind2nd_tran__plus_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_FP32 || GxB_NO_PLUS_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__plus_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__rminus_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fp64)
// A*D function (colscale): GB (_AxD__rminus_fp64)
// D*A function (rowscale): GB (_DxB__rminus_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fp64)
// C=scalar+B GB (_bind1st__rminus_fp64)
// C=scalar+B' GB (_bind1st_tran__rminus_fp64)
// C=A+scalar GB (_bind2nd__rminus_fp64)
// C=A'+scalar GB (_bind2nd_tran__rminus_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_FP64 || GxB_NO_RMINUS_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
update_ops_named_Z.c |
#include "constant.h"
#include "update_ops.h"
#include "utility.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef _USE_SIMD
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#endif
//void Z_gate_old_single(UINT target_qubit_index, CTYPE *state, ITYPE dim);
//void Z_gate_old_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim);
//void Z_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim);
//void Z_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim);
void Z_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
//Z_gate_old_single(target_qubit_index, state, dim);
//Z_gate_old_parallel(target_qubit_index, state, dim);
//Z_gate_single(target_qubit_index, state, dim);
//Z_gate_single_simd(target_qubit_index, state, dim);
//Z_gate_single_unroll(target_qubit_index, state, dim);
//Z_gate_parallel(target_qubit_index, state, dim);
//return;
#ifdef _USE_SIMD
#ifdef _OPENMP
UINT threshold = 13;
if (dim < (((ITYPE)1) << threshold)) {
Z_gate_single_simd(target_qubit_index, state, dim);
}
else {
Z_gate_parallel_simd(target_qubit_index, state, dim);
}
#else
Z_gate_single_simd(target_qubit_index, state, dim);
#endif
#else
#ifdef _OPENMP
UINT threshold = 13;
if (dim < (((ITYPE)1) << threshold)) {
Z_gate_single_unroll(target_qubit_index, state, dim);
}
else {
Z_gate_parallel_unroll(target_qubit_index, state, dim);
}
#else
Z_gate_single_unroll(target_qubit_index, state, dim);
#endif
#endif
}
void Z_gate_single_unroll(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
ITYPE state_index = 0;
if (target_qubit_index == 0) {
for (state_index = 1; state_index < dim; state_index += 2) {
state[state_index] *= -1;
}
}
else {
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask;
state[basis_index] *= -1;
state[basis_index+1] *= -1;
}
}
}
#ifdef _OPENMP
void Z_gate_parallel_unroll(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
ITYPE state_index = 0;
if (target_qubit_index == 0) {
#pragma omp parallel for
for (state_index = 1; state_index < dim; state_index += 2) {
state[state_index] *= -1;
}
}
else {
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask;
state[basis_index] *= -1;
state[basis_index + 1] *= -1;
}
}
}
#endif
#ifdef _USE_SIMD
void Z_gate_single_simd(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
ITYPE state_index = 0;
__m256d minus_one = _mm256_set_pd(-1,-1,-1,-1);
if (target_qubit_index == 0) {
for (state_index = 1; state_index < dim; state_index += 2) {
state[state_index] *= -1;
}
}
else {
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask;
double* ptr0 = (double*)(state + basis_index);
__m256d data0 = _mm256_loadu_pd(ptr0);
data0 = _mm256_mul_pd(data0, minus_one);
_mm256_storeu_pd(ptr0, data0);
}
}
}
#ifdef _OPENMP
void Z_gate_parallel_simd(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
ITYPE state_index = 0;
__m256d minus_one = _mm256_set_pd(-1, -1, -1, -1);
if (target_qubit_index == 0) {
#pragma omp parallel for
for (state_index = 1; state_index < dim; state_index += 2) {
state[state_index] *= -1;
}
}
else {
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask;
double* ptr0 = (double*)(state + basis_index);
__m256d data0 = _mm256_loadu_pd(ptr0);
data0 = _mm256_mul_pd(data0, minus_one);
_mm256_storeu_pd(ptr0, data0);
}
}
}
#endif
#endif
/*
void Z_gate_old_single(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
ITYPE state_index;
ITYPE mask = (1ULL << target_qubit_index);
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE temp_index = insert_zero_to_basis_index(state_index, mask, target_qubit_index) ^ mask;
state[temp_index] *= -1;
}
}
void Z_gate_old_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
ITYPE state_index;
ITYPE mask = (1ULL << target_qubit_index);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE temp_index = insert_zero_to_basis_index(state_index, mask, target_qubit_index) ^ mask;
state[temp_index] *= -1;
}
}
void Z_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
ITYPE state_index = 0;
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask;
state[basis_index] *= -1;
}
}
#ifdef _OPENMP
void Z_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE mask = (1ULL << target_qubit_index);
const ITYPE mask_low = mask - 1;
const ITYPE mask_high = ~mask_low;
ITYPE state_index = 0;
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index = (state_index&mask_low) + ((state_index&mask_high) << 1) + mask;
state[basis_index] *= -1;
}
}
#endif
*/
|
kvstore_dist_server.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file mxnet_node.h
* \brief implement mxnet nodes
*/
#ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#include <queue>
#include <string>
#include <mutex>
#include <condition_variable>
#include <memory>
#include <functional>
#include <future>
#include <vector>
#include "ps/ps.h"
#include "mxnet/kvstore.h"
#include "../operator/tensor/elemwise_binary_op-inl.h"
#include "../operator/tensor/init_op.h"
namespace mxnet {
namespace kvstore {
enum class CommandType {
kController, kStopServer, kSyncMode, kSetGradientCompression
};
enum class DataHandleType {
kDefaultPushPull, kCompressedPushPull, kRowSparsePushPull
};
/**
* \brief executor runs a function using the thread called \ref Start
*/
class Executor {
public:
/**
* \brief start the executor
*/
void Start() {
std::unique_lock<std::mutex> lk(mu_);
while (true) {
cond_.wait(lk, [this]{return !queue_.empty();});
Block blk = std::move(queue_.front());
queue_.pop();
lk.unlock();
if (blk.f) {
blk.f(); blk.p->set_value();
} else {
blk.p->set_value(); break;
}
lk.lock();
}
}
/**
* \brief function
*/
typedef std::function<void()> Func;
/**
* \brief let the thread called \ref Start to exec a function. threadsafe
*/
void Exec(const Func& func) {
Block blk(func);
auto fut = blk.p->get_future();
{
std::lock_guard<std::mutex> lk(mu_);
queue_.push(std::move(blk));
cond_.notify_one();
}
fut.wait();
}
/**
* \brief stop the thread, threadsafe
*/
void Stop() {
Exec(Func());
}
private:
struct Block {
explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { }
Func f;
std::shared_ptr<std::promise<void>> p;
};
std::queue<Block> queue_;
std::mutex mu_;
std::condition_variable cond_;
};
class KVStoreDistServer {
public:
KVStoreDistServer() {
using namespace std::placeholders;
ps_server_ = new ps::KVServer<float>(0);
static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle(
std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2));
ps_server_->set_request_handle(
std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3));
sync_mode_ = false;
gradient_compression_ = std::make_shared<GradientCompression>();
log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false);
}
~KVStoreDistServer() {
delete ps_server_;
}
void set_controller(const KVStore::Controller& controller) {
CHECK(controller);
controller_ = controller;
}
void set_updater(const KVStore::Updater& updater) {
CHECK(updater);
updater_ = updater;
}
/**
* \brief blocked until received the command \a kSyncMode
*/
void Run() {
exec_.Start();
}
private:
struct MergeBuf {
std::vector<ps::KVMeta> request;
NDArray array;
};
void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) {
CommandType recved_type = static_cast<CommandType>(recved.head);
if (recved_type == CommandType::kStopServer) {
exec_.Stop();
} else if (recved_type == CommandType::kSyncMode) {
sync_mode_ = true;
} else if (recved_type == CommandType::kSetGradientCompression) {
gradient_compression_->DecodeParams(recved.body);
} else {
// this uses value 0 for message id from frontend
// let the main thread to execute ctrl, which is necessary for python
exec_.Exec([this, recved]() {
CHECK(controller_);
controller_(recved.head, recved.body);
});
}
app->Response(recved);
}
void DataHandleEx(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t>& req_data,
ps::KVServer<real_t>* server) {
DataHandleType recved_type = static_cast<DataHandleType>(req_meta.cmd);
if (recved_type == DataHandleType::kRowSparsePushPull) {
DataHandleRowSparse(req_meta, req_data, server);
} else if (recved_type == DataHandleType::kCompressedPushPull) {
DataHandleCompressed(req_meta, req_data, server);
} else {
DataHandleDefault(req_meta, req_data, server);
}
return;
}
inline void ApplyUpdates(const ps::Key key, MergeBuf *merged, NDArray *stored,
ps::KVServer<real_t>* server) {
if (merged->request.size() == (size_t) ps::NumWorkers()) {
// let the main thread to execute updater_, which is necessary for python
if (updater_) {
exec_.Exec([this, key, merged, stored](){
CHECK(updater_);
updater_(key, merged->array, stored);
});
} else {
// if no updater, just copy
CopyFromTo(merged->array, stored);
}
if (log_verbose_) {
LOG(INFO) << "sync response to " << merged->request.size() << " workers";
}
stored->WaitToRead();
for (const auto& req : merged->request) {
ps::KVPairs<real_t> res;
size_t size = stored->shape().Size();
real_t* data = stored->data().dptr<real_t>();
ps::SArray<real_t> vals(data, size, false);
res.keys.push_back(key);
res.vals = vals;
res.lens.push_back(size);
server->Response(req, res);
}
merged->request.clear();
} else {
merged->array.WaitToRead();
}
}
void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices,
const int64_t master_key, const int64_t num_rows) {
indices[0] = 0;
for (int64_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(keys[i]);
auto row_id = key - master_key;
indices[i - 1] = row_id;
}
}
void DataHandleRowSparse(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t>& req_data,
ps::KVServer<real_t>* server) {
int master_key = DecodeKey(req_data.keys[0]);
auto num_rows = req_data.keys.size() - 1;
auto& stored = store_[master_key];
if (req_meta.push) {
CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty";
CHECK_EQ(req_data.lens[0], 0);
real_t* data = req_data.vals.data();
if (stored.is_none()) {
if (log_verbose_) LOG(INFO) << "initial push: " << master_key;
// initialization
CHECK_GT(num_rows, 0) << "init with empty data is not supported";
auto unit_len = req_data.lens[1];
CHECK_GT(unit_len, 0);
size_t ds[] = {num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
CHECK_EQ(req_data.vals.size(), num_rows * unit_len);
TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*)
NDArray recved = NDArray(recv_blob, 0);
stored = NDArray(kRowSparseStorage, dshape, Context());
Engine::Get()->PushAsync(
[recved, stored](RunContext ctx, Engine::CallbackOnComplete on_complete) {
NDArray rsp = stored;
stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])});
mshadow::Stream<cpu> *s = ctx.get_stream<cpu>();
using namespace mxnet::op;
nnvm::dim_t nnr = rsp.shape()[0];
MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, {
IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>();
mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx);
});
mshadow::Copy(rsp.data().FlatTo1D<cpu, float>(),
recved.data().FlatTo1D<cpu, float>(), s);
on_complete();
}, recved.ctx(), {recved.var()}, {stored.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
stored.WaitToRead();
server->Response(req_meta);
return;
}
// synced push
if (sync_mode_) {
if (log_verbose_) LOG(INFO) << "sync push: " << master_key << " " << req_data.keys;
auto& merged = merge_buf_[master_key];
if (merged.array.is_none()) {
merged.array = NDArray(kRowSparseStorage, stored.shape(), Context());
}
if (num_rows == 0) {
// reset to zeros
if (merged.request.size() == 0) {
merged.array = NDArray(kRowSparseStorage, stored.shape(), Context());
} else {
// nothing to aggregate
}
merged.request.push_back(req_meta);
ApplyUpdates(master_key, &merged, &stored, server);
return;
}
auto unit_len = req_data.lens[1];
CHECK_GT(unit_len, 0);
// indices
std::vector<int64_t> indices(num_rows);
DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows);
// data
TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask);
size_t ds[] = {(size_t) num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*)
// row_sparse NDArray
NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0);
if (merged.request.size() == 0) {
CopyFromTo(recved, &merged.array, 0);
} else {
NDArray out(kRowSparseStorage, stored.shape(), Context());
std::vector<Engine::VarHandle> const_vars;
const_vars.push_back(recved.var());
const_vars.push_back(merged.array.var());
// accumulate row_sparse gradients
// TODO(haibin) override + operator for row_sparse NDArray
// instead of calling BinaryComputeRspRsp directly
using namespace mshadow;
Engine::Get()->PushAsync(
[recved, merged, out](RunContext ctx, Engine::CallbackOnComplete on_complete) {
op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>(
{}, {}, {recved, merged.array}, {kWriteTo}, {out});
on_complete();
}, recved.ctx(), const_vars, {out.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
CopyFromTo(out, &merged.array, 0);
}
merged.request.push_back(req_meta);
ApplyUpdates(master_key, &merged, &stored, server);
} else {
// async push
if (log_verbose_) LOG(INFO) << "async push: " << master_key;
if (num_rows == 0) {
server->Response(req_meta);
return;
}
auto unit_len = req_data.lens[1];
CHECK_GT(unit_len, 0);
// indices
std::vector<int64_t> indices(num_rows);
DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows);
TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask);
size_t ds[] = {(size_t) num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*)
NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0);
exec_.Exec([this, master_key, &recved, &stored](){
CHECK(updater_);
updater_(master_key, recved, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
} else {
// pull
if (log_verbose_) LOG(INFO) << "pull: " << master_key;
ps::KVPairs<real_t> response;
if (num_rows == 0) {
std::vector<int> lens(req_data.keys.size(), 0);
response.keys = req_data.keys;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
return;
}
CHECK(!stored.is_none()) << "init " << master_key << " first";
auto shape = stored.shape();
auto unit_len = shape.ProdShape(1, shape.ndim());
const float* data = stored.data().dptr<float>();
auto len = unit_len * num_rows;
// concat values
response.vals.resize(len);
#pragma omp parallel for
for (size_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(req_data.keys[i]);
int64_t row_id = key - master_key;
const auto src = data + row_id * unit_len;
auto begin = (i - 1) * unit_len;
auto end = i * unit_len;
response.vals.segment(begin, end).CopyFrom(src, unit_len);
}
// setup response
response.keys = req_data.keys;
std::vector<int> lens(req_data.keys.size(), unit_len);
lens[0] = 0;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
}
}
void DefaultStorageResponse(int key, const NDArray& stored,
const ps::KVMeta& req_meta,
const ps::KVPairs<real_t> &req_data,
ps::KVServer<real_t>* server) {
ps::KVPairs<real_t> response;
CHECK(!stored.is_none()) << "init " << key << " first";
auto len = stored.shape().Size();
response.keys = req_data.keys;
response.lens = {len};
// TODO(mli) try to remove this CopyFrom
response.vals.CopyFrom(static_cast<const float*>(stored.data().dptr_), len);
server->Response(req_meta, response);
}
void DataHandleCompressed(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t> &req_data,
ps::KVServer<real_t>* server) {
if (req_meta.push) {
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
// first for dummy key which represents original size of array, whose len is 0
CHECK_EQ(req_data.keys.size(), (size_t)2);
CHECK_EQ(req_data.lens.size(), (size_t)2);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]);
int original_size = DecodeKey(req_data.keys[0]);
int key = DecodeKey(req_data.keys[1]);
auto& stored = store_[key];
size_t ds[] = {(size_t)req_data.lens[1]};
TShape dshape(ds, ds + 1);
TBlob recv_blob((real_t*) req_data.vals.data(), // NOLINT(*)
dshape, cpu::kDevMask);
NDArray recved = NDArray(recv_blob, 0);
NDArray decomp_buf = decomp_buf_[key];
dshape = TShape{(int64_t) original_size};
if (decomp_buf.is_none()) {
decomp_buf = NDArray(dshape, Context());
}
if (stored.is_none()) {
stored = NDArray(dshape, Context());
gradient_compression_->Dequantize(recved, &stored, 0);
server->Response(req_meta);
stored.WaitToRead();
} else if (sync_mode_) {
// synced push
auto& merged = merge_buf_[key];
if (merged.array.is_none()) {
merged.array = NDArray(dshape, Context());
}
if (merged.request.size() == 0) {
gradient_compression_->Dequantize(recved, &merged.array, 0);
} else {
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
merged.array += decomp_buf;
}
merged.request.push_back(req_meta);
ApplyUpdates(key, &merged, &stored, server);
} else {
// async push
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
exec_.Exec([this, key, &decomp_buf, &stored]() {
CHECK(updater_);
updater_(key, decomp_buf, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
} else { // pull
CHECK_EQ(req_data.keys.size(), (size_t)1);
CHECK_EQ(req_data.lens.size(), (size_t)0);
int key = DecodeKey(req_data.keys[0]);
DefaultStorageResponse(key, store_[key], req_meta, req_data, server);
}
}
void DataHandleDefault(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t> &req_data,
ps::KVServer<real_t>* server) {
CHECK_EQ(req_meta.cmd, static_cast<int>(DataHandleType::kDefaultPushPull));
// do some check
CHECK_EQ(req_data.keys.size(), (size_t)1);
if (req_meta.push) {
CHECK_EQ(req_data.lens.size(), (size_t)1);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]);
}
ps::Key key = req_data.keys[0];
auto& stored = store_[key];
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
if (req_meta.push) {
size_t ds[] = {(size_t)req_data.lens[0]};
TShape dshape(ds, ds + 1);
TBlob recv_blob((real_t*)req_data.vals.data(), // NOLINT(*)
dshape, cpu::kDevMask);
NDArray recved = NDArray(recv_blob, 0);
if (stored.is_none()) {
// initialization
stored = NDArray(dshape, Context());
CopyFromTo(recved, &stored, 0);
stored.WaitToRead();
ps::KVPairs<real_t> res;
size_t size = stored.shape().Size();
real_t* data = stored.data().dptr<real_t>();
ps::SArray<real_t> vals(data, size, false);
res.keys.push_back(key);
res.vals = vals;
res.lens.push_back(size);
server->Response(req_meta, res);
} else if (sync_mode_) {
// synced push
auto& merged = merge_buf_[key];
if (merged.array.is_none()) {
merged.array = NDArray(dshape, Context());
}
if (merged.request.size() == 0) {
CopyFromTo(recved, &merged.array, 0);
} else {
merged.array += recved;
}
merged.request.push_back(req_meta);
ApplyUpdates(key, &merged, &stored, server);
} else {
// async push
exec_.Exec([this, key, &recved, &stored](){
CHECK(updater_);
updater_(key, recved, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
} else {
DefaultStorageResponse(key, stored, req_meta, req_data, server);
}
}
int DecodeKey(ps::Key key) {
auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()];
return key - kr.begin();
}
/**
* \brief user defined mode for push
*/
bool sync_mode_;
KVStore::Controller controller_;
KVStore::Updater updater_;
/**
* \brief store_ contains the value at kvstore for each key
*/
std::unordered_map<ps::Key, NDArray> store_;
/**
* \brief merge_buf_ is a buffer used if sync_mode is true. It represents
* values from different workers being merged. The store will be updated
* to this value when values from all workers are pushed into this buffer.
*/
std::unordered_map<ps::Key, MergeBuf> merge_buf_;
/**
* \brief decomp_buf_ is a buffer into which compressed values are
* decompressed before merging to the store. used when compress_!='none'
*/
std::unordered_map<int, NDArray> decomp_buf_;
Executor exec_;
ps::KVServer<float>* ps_server_;
// whether to LOG verbose information
bool log_verbose_;
/**
* \brief gradient compression object.
* starts with none, used after SetGradientCompression sets the type
* currently there is no support for unsetting gradient compression
*/
std::shared_ptr<kvstore::GradientCompression> gradient_compression_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
|
inference_helper.h | /* Copyright 2021 iwatake2222
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef INFERENCE_HELPER_
#define INFERENCE_HELPER_
/* for general */
#include <cstdint>
#include <cmath>
#include <string>
#include <vector>
#include <array>
#include <memory>
class TensorInfo {
public:
enum {
kTensorTypeNone,
kTensorTypeUint8,
kTensorTypeInt8,
kTensorTypeFp32,
kTensorTypeInt32,
kTensorTypeInt64,
};
public:
TensorInfo()
: name("")
, id(-1)
, tensor_type(kTensorTypeNone)
, is_nchw(true)
{}
~TensorInfo() {}
int32_t GetElementNum() const
{
int32_t element_num = 1;
for (const auto& dim : tensor_dims) {
element_num *= dim;
}
return element_num;
}
int32_t GetBatch() const
{
if (tensor_dims.size() <= 0) return -1;
return tensor_dims[0];
}
int32_t GetChannel() const
{
if (is_nchw) {
if (tensor_dims.size() <= 1) return -1;
return tensor_dims[1];
} else {
if (tensor_dims.size() <= 3) return -1;
return tensor_dims[3];
}
}
int32_t GetHeight() const
{
if (is_nchw) {
if (tensor_dims.size() <= 2) return -1;
return tensor_dims[2];
} else {
if (tensor_dims.size() <= 1) return -1;
return tensor_dims[1];
}
}
int32_t GetWidth() const
{
if (is_nchw) {
if (tensor_dims.size() <= 3) return -1;
return tensor_dims[3];
} else {
if (tensor_dims.size() <= 2) return -1;
return tensor_dims[2];
}
}
public:
std::string name; // [In] Set the name_ of tensor
int32_t id; // [Out] Do not modify (Used in InferenceHelper)
int32_t tensor_type; // [In] The type of tensor (e.g. kTensorTypeFp32)
std::vector<int32_t> tensor_dims; // InputTensorInfo: [In] The dimentions of tensor. (If empty at initialize, the size is updated from model info.)
// OutputTensorInfo: [Out] The dimentions of tensor is set from model information
bool is_nchw; // [IN] NCHW or NHWC
};
class InputTensorInfo : public TensorInfo {
public:
enum {
kDataTypeImage,
kDataTypeBlobNhwc, // data_ which already finished preprocess(color conversion, resize, normalize_, etc.)
kDataTypeBlobNchw,
};
public:
InputTensorInfo()
: data(nullptr)
, data_type(kDataTypeImage)
, image_info({ -1, -1, -1, -1, -1, -1, -1, true, false })
, normalize({ 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f })
{}
InputTensorInfo(std::string name_, int32_t tensor_type_, bool is_nchw_ = true)
: InputTensorInfo()
{
name = name_;
tensor_type = tensor_type_;
is_nchw = is_nchw_;
}
~InputTensorInfo() {}
public:
void* data; // [In] Set the pointer to image/blob
int32_t data_type; // [In] Set the type of data_ (e.g. kDataTypeImage)
struct {
int32_t width;
int32_t height;
int32_t channel;
int32_t crop_x;
int32_t crop_y;
int32_t crop_width;
int32_t crop_height;
bool is_bgr; // used when channel == 3 (true: BGR, false: RGB)
bool swap_color;
} image_info; // [In] used when data_type_ == kDataTypeImage
struct {
float mean[3];
float norm[3];
} normalize; // [In] used when data_type_ == kDataTypeImage
};
class OutputTensorInfo : public TensorInfo {
public:
OutputTensorInfo()
: data(nullptr)
, quant({ 1.0f, 0 })
, data_fp32_(nullptr)
{}
OutputTensorInfo(std::string name_, int32_t tensor_type_, bool is_nchw_ = true)
: OutputTensorInfo()
{
name = name_;
tensor_type = tensor_type_;
is_nchw = is_nchw;
}
~OutputTensorInfo() {
if (data_fp32_ != nullptr) {
delete[] data_fp32_;
}
}
float* GetDataAsFloat() { /* Returned pointer should be with const, but returning pointer without const is convenient to create cv::Mat */
if (tensor_type == kTensorTypeUint8 || tensor_type == kTensorTypeInt8) {
if (data_fp32_ == nullptr) {
data_fp32_ = new float[GetElementNum()];
}
if (tensor_type == kTensorTypeUint8) {
#pragma omp parallel
for (int32_t i = 0; i < GetElementNum(); i++) {
const uint8_t* val_uint8 = static_cast<const uint8_t*>(data);
float val_float = (val_uint8[i] - quant.zero_point) * quant.scale;
data_fp32_[i] = val_float;
}
} else {
#pragma omp parallel
for (int32_t i = 0; i < GetElementNum(); i++) {
const int8_t* val_int8 = static_cast<const int8_t*>(data);
float val_float = (val_int8[i] - quant.zero_point) * quant.scale;
data_fp32_[i] = val_float;
}
}
return data_fp32_;
} else if (tensor_type == kTensorTypeFp32) {
return static_cast<float*>(data);
} else {
return nullptr;
}
}
public:
void* data; // [Out] Pointer to the output data_
struct {
float scale;
int32_t zero_point;
} quant; // [Out] Parameters for dequantization (convert uint8 to float)
private:
float* data_fp32_;
};
namespace cv {
class Mat;
};
class InferenceHelper {
public:
enum {
kRetOk = 0,
kRetErr = -1,
};
typedef enum {
kOpencv,
kOpencvGpu,
kTensorflowLite,
kTensorflowLiteXnnpack,
kTensorflowLiteGpu,
kTensorflowLiteEdgetpu,
kTensorflowLiteNnapi,
kTensorrt,
kNcnn,
kMnn,
kSnpe,
kArmnn,
kNnabla,
kNnablaCuda,
} HelperType;
public:
static InferenceHelper* Create(const HelperType helper_type);
static void PreProcessByOpenCV(const InputTensorInfo& input_tensor_info, bool is_nchw, cv::Mat& img_blob); // use this if the selected inference engine doesn't support pre-process
public:
virtual ~InferenceHelper() {}
virtual int32_t SetNumThreads(const int32_t num_threads) = 0;
virtual int32_t SetCustomOps(const std::vector<std::pair<const char*, const void*>>& custom_ops) = 0;
virtual int32_t Initialize(const std::string& model_filename, std::vector<InputTensorInfo>& input_tensor_info_list, std::vector<OutputTensorInfo>& output_tensor_info_list) = 0;
virtual int32_t Finalize(void) = 0;
virtual int32_t PreProcess(const std::vector<InputTensorInfo>& input_tensor_info_list) = 0;
virtual int32_t Process(std::vector<OutputTensorInfo>& output_tensor_info_list) = 0;
protected:
void ConvertNormalizeParameters(InputTensorInfo& tensor_info);
void PreProcessImage(int32_t num_thread, const InputTensorInfo& input_tensor_info, float* dst);
void PreProcessImage(int32_t num_thread, const InputTensorInfo& input_tensor_info, uint8_t* dst);
void PreProcessImage(int32_t num_thread, const InputTensorInfo& input_tensor_info, int8_t* dst);
template<typename T>
void PreProcessBlob(int32_t num_thread, const InputTensorInfo& input_tensor_info, T *dst);
protected:
HelperType helper_type_;
};
#endif
|
convect_particles_utilities.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Pablo Becker
//
//
#if !defined(KRATOS_CONVECT_PARTICLES_UTILITIES_INCLUDED )
#define KRATOS_CONVECT_PARTICLES_UTILITIES_INCLUDED
#define PRESSURE_ON_EULERIAN_MESH
#define USE_FEW_PARTICLES
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "utilities/geometry_utilities.h"
#include "geometries/tetrahedra_3d_4.h"
#include "includes/variables.h"
#include "spatial_containers/spatial_containers.h"
#include "utilities/timer.h"
#include "processes/node_erase_process.h"
#include "utilities/binbased_fast_point_locator.h"
#include <boost/timer.hpp>
#include "utilities/timer.h"
#ifdef _OPENMP
#include "omp.h"
#endif
namespace Kratos
{
template<std::size_t TDim> class ParticleConvectUtily
{
public:
KRATOS_CLASS_POINTER_DEFINITION(ParticleConvectUtily<TDim>);
ParticleConvectUtily(typename BinBasedFastPointLocator<TDim>::Pointer pSearchStructure)
: mpSearchStructure(pSearchStructure)
{
}
~ParticleConvectUtily()
{
}
//**********************************************************************************************
//**********************************************************************************************
///this function moves all the nodes contained in rModelPart from their position at time tn to the one at time
///tn+1 by following the trajectories. This is done by performing "subdivions" forward euler steps within each time step
///@param rModelPart the model part on which we work
///@param subdivisions number of forward euler substeps used in advancing in time
void MoveParticles_Substepping(ModelPart& rModelPart, unsigned int subdivisions)
{
KRATOS_TRY
const double dt = rModelPart.GetProcessInfo()[DELTA_TIME];
const double small_dt = dt/ static_cast<double>(subdivisions);
//do movement
array_1d<double, 3 > veulerian;
array_1d<double, 3 > acc_particle;
Vector N(TDim + 1);
const int max_results = rModelPart.Nodes().size();
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N,veulerian,acc_particle)
for (int i = 0; i < nparticles; i++)
{
unsigned int substep = 0;
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
Node < 3 > ::Pointer pparticle = *(iparticle.base());
array_1d<double,3> current_position = iparticle->GetInitialPosition() + iparticle->FastGetSolutionStepValue(DISPLACEMENT,1);
Element::Pointer pelement;
bool is_found = false;
array_1d<double, 3> aux_point_local_coordinates;
while(substep++ < subdivisions)
{
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
is_found = false;
if(substep > 1 ) //first check if it falls within the same element
{
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
is_found = geom.IsInside(current_position, aux_point_local_coordinates, 1.0e-5);
geom.ShapeFunctionsValues(N, aux_point_local_coordinates);
if(is_found == false)
is_found = mpSearchStructure->FindPointOnMesh(current_position, N, pelement, result_begin, max_results);
}
else //if not found use the search structure
{
is_found = mpSearchStructure->FindPointOnMesh(current_position, N, pelement, result_begin, max_results);
}
(iparticle)->Set(TO_ERASE, true);
if (is_found == true)
{
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
const double new_step_factor = static_cast<double>(substep)/subdivisions;
const double old_step_factor = 1.0 - new_step_factor;
noalias(veulerian) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY,1));
for (unsigned int k = 1; k < geom.size(); k++)
noalias(veulerian) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY,1) );
noalias(current_position) += small_dt*veulerian;
(iparticle)->Set(TO_ERASE, false);
}
else
break;
}
if (is_found == true)
{
iparticle->FastGetSolutionStepValue(DISPLACEMENT) = current_position - iparticle->GetInitialPosition();
noalias(pparticle->Coordinates()) = current_position;
}
}
KRATOS_CATCH("")
}
//**********************************************************************************************
//**********************************************************************************************
///this function moves the mesh as xn+1 = xn + vn*dt and sets the mesh velocity to vn
///@param rModelPart the model part on which we work
void MoveParticles_RK4(ModelPart& rModelPart)
{
KRATOS_TRY
const double dt = rModelPart.GetProcessInfo()[DELTA_TIME];
//do movement
array_1d<double, 3 > v1,v2,v3,v4,vtot,x;
Vector N(TDim + 1);
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N,v1,v2,v3,v4,vtot,x)
for (int i = 0; i < nparticles; i++)
{
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
Node < 3 > ::Pointer pparticle = *(iparticle.base());
array_1d<double,3> initial_position = iparticle->GetInitialPosition() + iparticle->FastGetSolutionStepValue(DISPLACEMENT,1);
Element::Pointer pelement;
bool is_found = false;
//STEP1
{
is_found = mpSearchStructure->FindPointOnMesh(initial_position, N, pelement, result_begin, max_results);
if( is_found == false) goto end_of_particle;
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
noalias(v1) = N[0] * ( geom[0].FastGetSolutionStepValue(VELOCITY,1));
for (unsigned int k = 1; k < geom.size(); k++)
noalias(v1) += N[k] * ( geom[k].FastGetSolutionStepValue(VELOCITY,1) );
}
//STEP2
// if(is_found == true)
{
noalias(x) = initial_position + (0.5*dt)*v1;
is_found = mpSearchStructure->FindPointOnMesh(x, N, pelement, result_begin, max_results);
if( is_found == false) goto end_of_particle;
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
const double new_step_factor = 0.5;
const double old_step_factor = 0.5;
noalias(v2) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY,1));
for (unsigned int k = 1; k < geom.size(); k++)
noalias(v2) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY,1) );
}
//STEP3
// if(is_found == true)
{
const array_1d<double,3> x = initial_position + (0.5*dt)*v2;
is_found = mpSearchStructure->FindPointOnMesh(x, N, pelement, result_begin, max_results);
if( is_found == false) goto end_of_particle;
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
const double new_step_factor = 0.5; //as the step before
const double old_step_factor = 0.5;
noalias(v3) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY,1));
for (unsigned int k = 1; k < geom.size(); k++)
noalias(v3) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY,1) );
}
//STEP4
// if(is_found == true)
{
const array_1d<double,3> x = initial_position + (dt)*v3;
is_found = mpSearchStructure->FindPointOnMesh(x, N, pelement, result_begin, max_results);
if( is_found == false) goto end_of_particle;
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
noalias(v4) = N[0] * ( geom[0].FastGetSolutionStepValue(VELOCITY));
for (unsigned int k = 1; k < geom.size(); k++)
noalias(v4) += N[k] * ( geom[k].FastGetSolutionStepValue(VELOCITY) );
}
(iparticle)->Set(TO_ERASE, false);
//finalize step
noalias(x) = initial_position;
noalias(x) += 0.16666666666666666666667*dt*v1;
noalias(x) += 0.33333333333333333333333*dt*v2;
noalias(x) += 0.33333333333333333333333*dt*v3;
noalias(x) += 0.16666666666666666666667*dt*v4;
iparticle->FastGetSolutionStepValue(DISPLACEMENT) = x - iparticle->GetInitialPosition();
noalias(pparticle->Coordinates()) = x;
end_of_particle: (iparticle)->Set(TO_ERASE, true);
}
KRATOS_CATCH("")
}
//**********************************************************************************************
//**********************************************************************************************
///this function erases the elements and conditions which have at least one node marked for erase
///@param rModelPart the model part on which we work
void EraseOuterElements(ModelPart& rModelPart)
{
KRATOS_TRY
int nerased_el = 0;
for(ModelPart::ElementsContainerType::iterator it = rModelPart.ElementsBegin(); it!=rModelPart.ElementsEnd(); it++)
{
Geometry< Node<3> >& geom = it->GetGeometry();
// bool erase_el = false;
for(unsigned int i=0; i<geom.size(); i++)
{
if(geom[i].Is(TO_ERASE))
{
it->Set(TO_ERASE,true);
nerased_el++;
break;
}
}
}
if(nerased_el > 0)
{
ModelPart::ElementsContainerType temp_elems_container;
temp_elems_container.reserve(rModelPart.Elements().size() - nerased_el);
temp_elems_container.swap(rModelPart.Elements());
for(ModelPart::ElementsContainerType::iterator it = temp_elems_container.begin() ; it != temp_elems_container.end() ; it++)
{
if( it->IsNot(TO_ERASE) )
(rModelPart.Elements()).push_back(*(it.base()));
}
}
KRATOS_CATCH("")
}
private:
typename BinBasedFastPointLocator<TDim>::Pointer mpSearchStructure;
};
} // namespace Kratos.
#endif // KRATOS_CONVECT_PARTICLES_UTILITIES_INCLUDED defined
|
bli_dotv_bgq_int.c | /*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name(s) of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
void bli_ddotv_bgq_int
(
conj_t conjx,
conj_t conjy,
dim_t n,
double* restrict x, inc_t incx,
double* restrict y, inc_t incy,
double* restrict rho,
cntx_t* restrict cntx
)
{
bool_t use_ref = FALSE;
// If the vector lengths are zero, set rho to zero and return.
if ( bli_zero_dim1( n ) ) {
PASTEMAC(d,set0s)( *rho );
return;
}
// If there is anything that would interfere with our use of aligned
// vector loads/stores, call the reference implementation.
if ( incx != 1 || incy != 1 || bli_is_unaligned_to( ( siz_t )x, 32 ) || bli_is_unaligned_to( ( siz_t )y, 32 ) )
use_ref = TRUE;
// Call the reference implementation if needed.
if ( use_ref ) {
BLIS_DDOTV_KERNEL_REF( conjx, conjy, n, x, incx, y, incy, rho, cntx );
return;
}
dim_t n_run = n / 4;
dim_t n_left = n % 4;
double rhos = 0.0;
#pragma omp parallel reduction(+:rhos)
{
dim_t n_threads;
dim_t t_id = omp_get_thread_num();
n_threads = omp_get_num_threads();
vector4double rhov = vec_splats( 0.0 );
vector4double xv, yv;
for ( dim_t i = t_id; i < n_run; i += n_threads )
{
xv = vec_lda( 0 * sizeof(double), &x[i*4] );
yv = vec_lda( 0 * sizeof(double), &y[i*4] );
rhov = vec_madd( xv, yv, rhov );
}
rhos += vec_extract( rhov, 0 );
rhos += vec_extract( rhov, 1 );
rhos += vec_extract( rhov, 2 );
rhos += vec_extract( rhov, 3 );
}
for ( dim_t i = 0; i < n_left; i++ )
{
rhos += x[4*n_run + i] * y[4*n_run + i];
}
*rho = rhos;
}
|
paint.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP AAA IIIII N N TTTTT %
% P P A A I NN N T %
% PPPP AAAAA I N N N T %
% P A A I N NN T %
% P A A IIIII N N T %
% %
% %
% Methods to Paint on an Image %
% %
% Software Design %
% John Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o o d f i l l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FloodfillPaintImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly.
% However, in many cases two colors may differ by a small amount. The
% fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now
% interpreted as the same color for the purposes of the floodfill.
%
% The format of the FloodfillPaintImage method is:
%
% MagickBooleanType FloodfillPaintImage(Image *image,
% const ChannelType channel,const DrawInfo *draw_info,
% const MagickPixelPacket target,const ssize_t x_offset,
% const ssize_t y_offset,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o draw_info: the draw info.
%
% o target: the RGB value of the target color.
%
% o x_offset,y_offset: the starting location of the operation.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType FloodfillPaintImage(Image *image,
const ChannelType channel,const DrawInfo *draw_info,
const MagickPixelPacket *target,const ssize_t x_offset,const ssize_t y_offset,
const MagickBooleanType invert)
{
#define MaxStacksize (1UL << 15)
#define PushSegmentStack(up,left,right,delta) \
{ \
if (s >= (segment_stack+MaxStacksize)) \
ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \
else \
{ \
if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \
{ \
s->x1=(double) (left); \
s->y1=(double) (up); \
s->x2=(double) (right); \
s->y2=(double) (delta); \
s++; \
} \
} \
}
CacheView
*floodplane_view,
*image_view;
ExceptionInfo
*exception;
Image
*floodplane_image;
MagickBooleanType
skip;
MagickPixelPacket
fill,
pixel;
PixelPacket
fill_color;
register SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickSignature);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if ((target->opacity != OpaqueOpacity) && (image->matte == MagickFalse))
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Set floodfill state.
*/
floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
(void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel);
segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize,
sizeof(*segment_stack));
if (segment_stack == (SegmentInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Push initial segment on stack.
*/
exception=(&image->exception);
x=x_offset;
y=y_offset;
start=0;
s=segment_stack;
PushSegmentStack(y,x,x,1);
PushSegmentStack(y+1,x,x,-1);
GetMagickPixelPacket(image,&fill);
GetMagickPixelPacket(image,&pixel);
image_view=AcquireCacheView(image);
floodplane_view=AcquireCacheView(floodplane_image);
while (s > segment_stack)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception);
q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
p+=x1;
q+=x1;
for (x=x1; x >= 0; x--)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) == invert)
break;
q->opacity=(Quantum) TransparentOpacity;
p--;
q--;
}
if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,
image->columns-x,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for ( ; x < (ssize_t) image->columns; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) == invert)
break;
q->opacity=(Quantum) TransparentOpacity;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for ( ; x <= x2; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) != invert)
break;
p++;
q++;
}
}
start=x;
} while (x <= x2);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Tile fill color onto floodplane.
*/
p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
{
(void) GetFillColor(draw_info,x,y,&fill_color);
SetMagickPixelPacket(image,&fill_color,(IndexPacket *) NULL,&fill);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&fill);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(fill.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(fill.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(fill.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(fill.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(fill.index));
}
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
floodplane_view=DestroyCacheView(floodplane_view);
image_view=DestroyCacheView(image_view);
segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack);
floodplane_image=DestroyImage(floodplane_image);
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GradientImage() applies a continuously smooth color transitions along a
% vector from one color to another.
%
% Note, the interface of this method will change in the future to support
% more than one transistion.
%
% The format of the GradientImage method is:
%
% MagickBooleanType GradientImage(Image *image,const GradientType type,
% const SpreadMethod method,const PixelPacket *start_color,
% const PixelPacket *stop_color)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the gradient type: linear or radial.
%
% o spread: the gradient spread meathod: pad, reflect, or repeat.
%
% o start_color: the start color.
%
% o stop_color: the stop color.
%
% This provides a good example of making use of the DrawGradientImage
% function and the gradient structure in draw_info.
*/
static inline double MagickMax(const double x,const double y)
{
return(x > y ? x : y);
}
MagickExport MagickBooleanType GradientImage(Image *image,
const GradientType type,const SpreadMethod method,
const PixelPacket *start_color,const PixelPacket *stop_color)
{
DrawInfo
*draw_info;
GradientInfo
*gradient;
MagickBooleanType
status;
register ssize_t
i;
/*
Set gradient start-stop end points.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(start_color != (const PixelPacket *) NULL);
assert(stop_color != (const PixelPacket *) NULL);
draw_info=AcquireDrawInfo();
gradient=(&draw_info->gradient);
gradient->type=type;
gradient->bounding_box.width=image->columns;
gradient->bounding_box.height=image->rows;
gradient->gradient_vector.x2=(double) image->columns-1.0;
gradient->gradient_vector.y2=(double) image->rows-1.0;
if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0))
gradient->gradient_vector.x2=0.0;
gradient->center.x=(double) gradient->gradient_vector.x2/2.0;
gradient->center.y=(double) gradient->gradient_vector.y2/2.0;
gradient->radius=MagickMax(gradient->center.x,gradient->center.y);
gradient->spread=method;
/*
Define the gradient to fill between the stops.
*/
gradient->number_stops=2;
gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops,
sizeof(*gradient->stops));
if (gradient->stops == (StopInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(gradient->stops,0,gradient->number_stops*
sizeof(*gradient->stops));
for (i=0; i < (ssize_t) gradient->number_stops; i++)
GetMagickPixelPacket(image,&gradient->stops[i].color);
SetMagickPixelPacket(image,start_color,(IndexPacket *) NULL,
&gradient->stops[0].color);
gradient->stops[0].offset=0.0;
SetMagickPixelPacket(image,stop_color,(IndexPacket *) NULL,
&gradient->stops[1].color);
gradient->stops[1].offset=1.0;
/*
Draw a gradient on the image.
*/
status=DrawGradientImage(image,draw_info);
draw_info=DestroyDrawInfo(draw_info);
if ((start_color->opacity == OpaqueOpacity) &&
(stop_color->opacity == OpaqueOpacity))
image->matte=MagickFalse;
if ((IsGrayPixel(start_color) != MagickFalse) &&
(IsGrayPixel(stop_color) != MagickFalse))
image->type=GrayscaleType;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O i l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OilPaintImage() applies a special effect filter that simulates an oil
% painting. Each pixel is replaced by the most frequent color occurring
% in a circular region defined by radius.
%
% The format of the OilPaintImage method is:
%
% Image *OilPaintImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the circular neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t **DestroyHistogramThreadSet(size_t **histogram)
{
register ssize_t
i;
assert(histogram != (size_t **) NULL);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
if (histogram[i] != (size_t *) NULL)
histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]);
histogram=(size_t **) RelinquishMagickMemory(histogram);
return(histogram);
}
static size_t **AcquireHistogramThreadSet(const size_t count)
{
register ssize_t
i;
size_t
**histogram,
number_threads;
number_threads=GetOpenMPMaximumThreads();
histogram=(size_t **) AcquireQuantumMemory(number_threads,
sizeof(*histogram));
if (histogram == (size_t **) NULL)
return((size_t **) NULL);
(void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram));
for (i=0; i < (ssize_t) number_threads; i++)
{
histogram[i]=(size_t *) AcquireQuantumMemory(count,
sizeof(**histogram));
if (histogram[i] == (size_t *) NULL)
return(DestroyHistogramThreadSet(histogram));
}
return(histogram);
}
MagickExport Image *OilPaintImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
#define NumberPaintBins 256
#define OilPaintImageTag "OilPaint/Image"
CacheView
*image_view,
*paint_view;
Image
*paint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
**restrict histograms,
width;
ssize_t
y;
/*
Initialize painted image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth2D(radius,0.5);
paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (paint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(paint_image,DirectClass) == MagickFalse)
{
InheritException(exception,&paint_image->exception);
paint_image=DestroyImage(paint_image);
return((Image *) NULL);
}
histograms=AcquireHistogramThreadSet(NumberPaintBins);
if (histograms == (size_t **) NULL)
{
paint_image=DestroyImage(paint_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Oil paint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
paint_view=AcquireCacheView(paint_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict paint_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
register size_t
*histogram;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(width/2L),image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
paint_indexes=GetCacheViewAuthenticIndexQueue(paint_view);
histogram=histograms[GetOpenMPThreadId()];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i,
u;
size_t
count;
ssize_t
j,
k,
v;
/*
Assign most frequent color.
*/
i=0;
j=0;
count=0;
(void) ResetMagickMemory(histogram,0,NumberPaintBins*sizeof(*histogram));
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
k=(ssize_t) ScaleQuantumToChar(PixelIntensityToQuantum(p+u+i));
histogram[k]++;
if (histogram[k] > count)
{
j=i+u;
count=histogram[k];
}
}
i+=(ssize_t) (image->columns+width);
}
*q=(*(p+j));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(paint_indexes+x,GetPixelIndex(
indexes+x+j));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OilPaintImage)
#endif
proceed=SetImageProgress(image,OilPaintImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
paint_view=DestroyCacheView(paint_view);
image_view=DestroyCacheView(image_view);
histograms=DestroyHistogramThreadSet(histograms);
if (status == MagickFalse)
paint_image=DestroyImage(paint_image);
return(paint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p a q u e P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpaquePaintImage() changes any pixel that matches color with the color
% defined by fill.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the OpaquePaintImage method is:
%
% MagickBooleanType OpaquePaintImage(Image *image,
% const PixelPacket *target,const PixelPacket *fill,
% const MagickBooleanType invert)
% MagickBooleanType OpaquePaintImageChannel(Image *image,
% const ChannelType channel,const PixelPacket *target,
% const PixelPacket *fill,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType OpaquePaintImage(Image *image,
const MagickPixelPacket *target,const MagickPixelPacket *fill,
const MagickBooleanType invert)
{
return(OpaquePaintImageChannel(image,CompositeChannels,target,fill,invert));
}
MagickExport MagickBooleanType OpaquePaintImageChannel(Image *image,
const ChannelType channel,const MagickPixelPacket *target,
const MagickPixelPacket *fill,const MagickBooleanType invert)
{
#define OpaquePaintImageTag "Opaque/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(target != (MagickPixelPacket *) NULL);
assert(fill != (MagickPixelPacket *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if ((fill->opacity != OpaqueOpacity) && (image->matte == MagickFalse))
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Make image color opaque.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
GetMagickPixelPacket(image,&zero);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) != invert)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(fill->red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(fill->green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(fill->blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(fill->opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(fill->index));
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OpaquePaintImageChannel)
#endif
proceed=SetImageProgress(image,OpaquePaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the TransparentPaintImage method is:
%
% MagickBooleanType TransparentPaintImage(Image *image,
% const MagickPixelPacket *target,const Quantum opacity,
% const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType TransparentPaintImage(Image *image,
const MagickPixelPacket *target,const Quantum opacity,
const MagickBooleanType invert)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(target != (MagickPixelPacket *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
GetMagickPixelPacket(image,&zero);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) != invert)
q->opacity=opacity;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransparentPaintImage)
#endif
proceed=SetImageProgress(image,TransparentPaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e C h r o m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImageChroma() changes the opacity value associated with any
% pixel that matches color to the value defined by opacity.
%
% As there is one fuzz value for the all the channels, the
% TransparentPaintImage() API is not suitable for the operations like chroma,
% where the tolerance for similarity of two color component (RGB) can be
% different, Thus we define this method take two target pixels (one
% low and one hight) and all the pixels of an image which are lying between
% these two pixels are made transparent.
%
% The format of the TransparentPaintImage method is:
%
% MagickBooleanType TransparentPaintImage(Image *image,
% const MagickPixelPacket *low,const MagickPixelPacket *hight,
% const Quantum opacity,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low: the low target color.
%
% o high: the high target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image,
const MagickPixelPacket *low,const MagickPixelPacket *high,
const Quantum opacity,const MagickBooleanType invert)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(high != (MagickPixelPacket *) NULL);
assert(low != (MagickPixelPacket *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,ResetAlphaChannel);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
match;
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
GetMagickPixelPacket(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
match=((pixel.red >= low->red) && (pixel.red <= high->red) &&
(pixel.green >= low->green) && (pixel.green <= high->green) &&
(pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ?
MagickTrue : MagickFalse;
if (match != invert)
q->opacity=opacity;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransparentPaintImageChroma)
#endif
proceed=SetImageProgress(image,TransparentPaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
cv_basic.h | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
<<<<<<< HEAD
#include "lite/utils/cv/cv_enum.h"
typedef paddle::lite::utils::cv::ImageFormat ImageFormat;
typedef paddle::lite::utils::cv::FlipParam FlipParam;
typedef paddle::lite::utils::cv::LayOut LayOut;
typedef paddle::lite::Tensor Tensor;
=======
#include "lite/utils/cv/paddle_image_preprocess.h"
typedef paddle::lite::utils::cv::ImageFormat ImageFormat;
typedef paddle::lite::utils::cv::FlipParam FlipParam;
typedef paddle::lite::Tensor Tensor;
typedef paddle::lite_api::DataLayoutType LayoutType;
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
void nv2bgr(const uint8_t* in_data,
uint8_t* out_data,
int srcw,
int srch,
int v_num,
int u_num) {
int size = srch * srcw;
const uint8_t* y_ptr = in_data;
const uint8_t* uv_ptr = in_data + size;
for (int i = 0; i < srch; i++) {
int j = 0;
const uint8_t* ptr_y1 = y_ptr + i * srcw;
const uint8_t* ptr_vu = uv_ptr + (i / 2) * srcw;
uint8_t* ptr_bgr1 = out_data + i * 3 * srcw;
for (; j < srcw; j += 2) {
uint8_t _y0 = ptr_y1[0];
uint8_t _y1 = ptr_y1[1];
uint8_t _v = ptr_vu[v_num];
uint8_t _u = ptr_vu[u_num];
int ra = floor((179 * (_v - 128)) >> 7);
int ga = floor((44 * (_u - 128) + 91 * (_v - 128)) >> 7);
int ba = floor((227 * (_u - 128)) >> 7);
int r = _y0 + ra;
int g = _y0 - ga;
int b = _y0 + ba;
int r1 = _y1 + ra;
int g1 = _y1 - ga;
int b1 = _y1 + ba;
r = r < 0 ? 0 : (r > 255) ? 255 : r;
g = g < 0 ? 0 : (g > 255) ? 255 : g;
b = b < 0 ? 0 : (b > 255) ? 255 : b;
r1 = r1 < 0 ? 0 : (r1 > 255) ? 255 : r1;
g1 = g1 < 0 ? 0 : (g1 > 255) ? 255 : g1;
b1 = b1 < 0 ? 0 : (b1 > 255) ? 255 : b1;
*ptr_bgr1++ = b;
*ptr_bgr1++ = g;
*ptr_bgr1++ = r;
*ptr_bgr1++ = b1;
*ptr_bgr1++ = g1;
*ptr_bgr1++ = r1;
ptr_y1 += 2;
ptr_vu += 2;
}
if (j < srcw) {
uint8_t _y = ptr_y1[0];
uint8_t _v = ptr_vu[v_num];
uint8_t _u = ptr_vu[u_num];
int r = _y + ((179 * (_v - 128)) >> 7);
int g = _y - ((44 * (_u - 128) - 91 * (_v - 128)) >> 7);
int b = _y + ((227 * (_u - 128)) >> 7);
r = r < 0 ? 0 : (r > 255) ? 255 : r;
g = g < 0 ? 0 : (g > 255) ? 255 : g;
b = b < 0 ? 0 : (b > 255) ? 255 : b;
ptr_bgr1[0] = b;
ptr_bgr1[1] = g;
ptr_bgr1[2] = r;
}
}
}
void nv2bgra(const uint8_t* in_data,
uint8_t* out_data,
int srcw,
int srch,
int v_num,
int u_num) {
int size = srch * srcw;
const uint8_t* y_ptr = in_data;
const uint8_t* uv_ptr = in_data + size;
for (int i = 0; i < srch; i++) {
int j = 0;
const uint8_t* ptr_y1 = y_ptr + i * srcw;
const uint8_t* ptr_vu = uv_ptr + (i / 2) * srcw;
uint8_t* ptr_bgr1 = out_data + i * 4 * srcw;
for (; j < srcw; j += 2) {
uint8_t _y0 = ptr_y1[0];
uint8_t _y1 = ptr_y1[1];
uint8_t _v = ptr_vu[v_num];
uint8_t _u = ptr_vu[u_num];
int ra = floor((179 * (_v - 128)) >> 7);
int ga = floor((44 * (_u - 128) + 91 * (_v - 128)) >> 7);
int ba = floor((227 * (_u - 128)) >> 7);
int r = _y0 + ra;
int g = _y0 - ga;
int b = _y0 + ba;
int r1 = _y1 + ra;
int g1 = _y1 - ga;
int b1 = _y1 + ba;
r = r < 0 ? 0 : (r > 255) ? 255 : r;
g = g < 0 ? 0 : (g > 255) ? 255 : g;
b = b < 0 ? 0 : (b > 255) ? 255 : b;
r1 = r1 < 0 ? 0 : (r1 > 255) ? 255 : r1;
g1 = g1 < 0 ? 0 : (g1 > 255) ? 255 : g1;
b1 = b1 < 0 ? 0 : (b1 > 255) ? 255 : b1;
*ptr_bgr1++ = b;
*ptr_bgr1++ = g;
*ptr_bgr1++ = r;
*ptr_bgr1++ = 255;
*ptr_bgr1++ = b1;
*ptr_bgr1++ = g1;
*ptr_bgr1++ = r1;
*ptr_bgr1++ = 255;
ptr_y1 += 2;
ptr_vu += 2;
}
if (j < srcw) {
uint8_t _y = ptr_y1[0];
uint8_t _v = ptr_vu[v_num];
uint8_t _u = ptr_vu[u_num];
int r = _y + ((179 * (_v - 128)) >> 7);
int g = _y - ((44 * (_u - 128) - 91 * (_v - 128)) >> 7);
int b = _y + ((227 * (_u - 128)) >> 7);
r = r < 0 ? 0 : (r > 255) ? 255 : r;
g = g < 0 ? 0 : (g > 255) ? 255 : g;
b = b < 0 ? 0 : (b > 255) ? 255 : b;
ptr_bgr1[0] = b;
ptr_bgr1[1] = g;
ptr_bgr1[2] = r;
ptr_bgr1[3] = 255;
}
}
}
void nv12_bgr_basic(const uint8_t* in_data,
uint8_t* out_data,
int srcw,
int srch) {
nv2bgr(in_data, out_data, srcw, srch, 1, 0);
}
void nv21_bgr_basic(const uint8_t* in_data,
uint8_t* out_data,
int srcw,
int srch) {
nv2bgr(in_data, out_data, srcw, srch, 0, 1);
}
void nv12_bgra_basic(const uint8_t* in_data,
uint8_t* out_data,
int srcw,
int srch) {
nv2bgra(in_data, out_data, srcw, srch, 1, 0);
}
void nv21_bgra_basic(const uint8_t* in_data,
uint8_t* out_data,
int srcw,
int srch) {
nv2bgra(in_data, out_data, srcw, srch, 0, 1);
}
/*
<<<<<<< HEAD
/*
=======
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
采用CV_BGR2GRAY,转换公式Gray = 0.1140*B + 0.5870*G + 0.2989*R
采用CV_RGB2GRAY,转换公式Gray = 0.1140*R + 0.5870*G + 0.2989*B
b = 0.114 *128 = 14.529 = 15
g = 0.587 * 128 = 75.136 = 75
r = 0.2989 * 128 = 38.2592 = 38
Gray = (15*B + 75*G + 38*R)/128
bgr2gray, rgb2gray
*/
void bgr_gray_basic(const uint8_t* in_data,
uint8_t* out_data,
int srcw,
int srch) {
for (int i = 0; i < srch; i++) {
const uint8_t* din_ptr = in_data + i * 3 * srcw;
uint8_t* dout_ptr = out_data + i * srcw;
for (int j = 0; j < srcw; j++) {
int sum = din_ptr[0] * 15 + din_ptr[1] * 75 + din_ptr[2] * 38;
sum = sum >> 7;
*dout_ptr++ = sum;
din_ptr += 3;
}
}
}
<<<<<<< HEAD
=======
void bgra_gray_basic(const uint8_t* in_data,
uint8_t* out_data,
int srcw,
int srch) {
for (int i = 0; i < srch; i++) {
const uint8_t* din_ptr = in_data + i * 4 * srcw;
uint8_t* dout_ptr = out_data + i * srcw;
for (int j = 0; j < srcw; j++) {
int sum = din_ptr[0] * 15 + din_ptr[1] * 75 + din_ptr[2] * 38;
sum = sum >> 7;
*dout_ptr++ = sum;
din_ptr += 4;
}
}
}
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
void gray_bgr_basic(const uint8_t* src, uint8_t* dst, int srcw, int srch) {
for (int i = 0; i < srch; i++) {
for (int j = 0; j < srcw; j++) {
*dst++ = *src;
*dst++ = *src;
*dst++ = *src;
src++;
}
}
}
<<<<<<< HEAD
=======
void gray_bgra_basic(const uint8_t* src, uint8_t* dst, int srcw, int srch) {
for (int i = 0; i < srch; i++) {
for (int j = 0; j < srcw; j++) {
*dst++ = *src;
*dst++ = *src;
*dst++ = *src;
*dst++ = 255;
src++;
}
}
}
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
// bgr2bgra, rgb2rgba
void hwc3_to_hwc4_basic(const uint8_t* src, uint8_t* dst, int srcw, int srch) {
for (int i = 0; i < srch; i++) {
for (int j = 0; j < srcw; j++) {
*dst++ = *src++;
*dst++ = *src++;
*dst++ = *src++;
*dst++ = 255;
}
}
}
// bgra2bgr, rgba2rgb
void hwc4_to_hwc3_basic(const uint8_t* src, uint8_t* dst, int srcw, int srch) {
for (int i = 0; i < srch; i++) {
for (int j = 0; j < srcw; j++) {
*dst++ = *src++;
*dst++ = *src++;
*dst++ = *src++;
src++;
}
}
}
// bgr2rgb, rgb2bgr
void hwc3_trans_basic(const uint8_t* src, uint8_t* dst, int srcw, int srch) {
for (int i = 0; i < srch; i++) {
for (int j = 0; j < srcw; j++) {
*dst++ = src[2]; // r
*dst++ = src[1]; // g
*dst++ = src[0]; // b
src += 3;
}
}
}
// bgra2rgba, rgba2bgra
void hwc4_trans_basic(const uint8_t* src, uint8_t* dst, int srcw, int srch) {
for (int i = 0; i < srch; i++) {
for (int j = 0; j < srcw; j++) {
*dst++ = src[2]; // r
*dst++ = src[1]; // g
*dst++ = src[0]; // b
*dst++ = src[3]; // a
src += 4;
}
}
}
// bgra2rgb, rgba2bgr
void hwc4_trans_hwc3_basic(const uint8_t* src,
uint8_t* dst,
int srcw,
int srch) {
for (int i = 0; i < srch; i++) {
for (int j = 0; j < srcw; j++) {
*dst++ = src[2]; // r
*dst++ = src[1]; // g
*dst++ = src[0]; // b
// *dst++ = src[4];//a
src += 4;
}
}
}
// bgr2rgba, rgb2bga
void hwc3_trans_hwc4_basic(const uint8_t* src,
uint8_t* dst,
int srcw,
int srch) {
for (int i = 0; i < srch; i++) {
for (int j = 0; j < srcw; j++) {
*dst++ = src[2]; // r
*dst++ = src[1]; // g
*dst++ = src[0]; // b
*dst++ = 255; // a
src += 3;
}
}
}
void image_convert_basic(const uint8_t* in_data,
uint8_t* out_data,
ImageFormat srcFormat,
ImageFormat dstFormat,
int srcw,
int srch,
int out_size) {
if (srcFormat == dstFormat) {
// copy
memcpy(out_data, in_data, sizeof(uint8_t) * out_size);
return;
} else {
if (srcFormat == ImageFormat::NV12 &&
(dstFormat == ImageFormat::BGR || dstFormat == ImageFormat::RGB)) {
nv12_bgr_basic(in_data, out_data, srcw, srch);
} else if (srcFormat == ImageFormat::NV21 &&
(dstFormat == ImageFormat::BGR ||
dstFormat == ImageFormat::RGB)) {
nv21_bgr_basic(in_data, out_data, srcw, srch);
} else if (srcFormat == ImageFormat::NV12 &&
(dstFormat == ImageFormat::BGRA ||
dstFormat == ImageFormat::RGBA)) {
nv12_bgra_basic(in_data, out_data, srcw, srch);
} else if (srcFormat == ImageFormat::NV21 &&
(dstFormat == ImageFormat::BGRA ||
dstFormat == ImageFormat::RGBA)) {
nv21_bgra_basic(in_data, out_data, srcw, srch);
} else if ((srcFormat == ImageFormat::RGB &&
dstFormat == ImageFormat::GRAY) ||
(srcFormat == ImageFormat::BGR &&
dstFormat == ImageFormat::GRAY)) {
bgr_gray_basic(in_data, out_data, srcw, srch);
} else if ((srcFormat == ImageFormat::GRAY &&
dstFormat == ImageFormat::RGB) ||
(srcFormat == ImageFormat::GRAY &&
dstFormat == ImageFormat::BGR)) {
gray_bgr_basic(in_data, out_data, srcw, srch);
} else if ((srcFormat == ImageFormat::RGBA &&
<<<<<<< HEAD
=======
dstFormat == ImageFormat::GRAY) ||
(srcFormat == ImageFormat::BGRA &&
dstFormat == ImageFormat::GRAY)) {
bgra_gray_basic(in_data, out_data, srcw, srch);
} else if ((srcFormat == ImageFormat::GRAY &&
dstFormat == ImageFormat::RGBA) ||
(srcFormat == ImageFormat::GRAY &&
dstFormat == ImageFormat::BGRA)) {
gray_bgra_basic(in_data, out_data, srcw, srch);
} else if ((srcFormat == ImageFormat::RGBA &&
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
dstFormat == ImageFormat::RGB) ||
(srcFormat == ImageFormat::BGRA &&
dstFormat == ImageFormat::BGR)) {
hwc4_to_hwc3_basic(in_data, out_data, srcw, srch);
} else if ((srcFormat == ImageFormat::RGB &&
dstFormat == ImageFormat::RGBA) ||
(srcFormat == ImageFormat::BGR &&
dstFormat == ImageFormat::BGRA)) {
hwc3_to_hwc4_basic(in_data, out_data, srcw, srch);
} else if ((srcFormat == ImageFormat::RGB &&
dstFormat == ImageFormat::BGR) ||
(srcFormat == ImageFormat::BGR &&
dstFormat == ImageFormat::RGB)) {
hwc3_trans_basic(in_data, out_data, srcw, srch);
} else if ((srcFormat == ImageFormat::RGBA &&
dstFormat == ImageFormat::BGRA) ||
(srcFormat == ImageFormat::BGRA &&
dstFormat == ImageFormat::RGBA)) {
hwc4_trans_basic(in_data, out_data, srcw, srch);
} else if ((srcFormat == ImageFormat::RGBA &&
dstFormat == ImageFormat::BGR) ||
(srcFormat == ImageFormat::BGRA &&
dstFormat == ImageFormat::RGB)) {
hwc4_trans_hwc3_basic(in_data, out_data, srcw, srch);
} else if ((srcFormat == ImageFormat::RGB &&
dstFormat == ImageFormat::BGRA) ||
(srcFormat == ImageFormat::BGR &&
dstFormat == ImageFormat::RGBA)) {
hwc3_trans_hwc4_basic(in_data, out_data, srcw, srch);
} else {
printf("srcFormat: %d, dstFormat: %d does not support! \n",
srcFormat,
dstFormat);
}
// for (int i = 0; i < out_size; i++){
// printf("%d ", *out_data++);
// if ((i+1) % 10 == 0){
// printf("\n");
// }
// }
}
}
void compute_xy(int srcw,
int srch,
int dstw,
int dsth,
double scale_x,
double scale_y,
int* xofs,
int* yofs,
float* ialpha,
float* ibeta) {
float fy = 0.f;
float fx = 0.f;
int sy = 0;
int sx = 0;
const int resize_coef_bits = 11;
const int resize_coef_scale = 1 << resize_coef_bits;
for (int dx = 0; dx < dstw; dx++) {
fx = static_cast<float>((dx + 0.5) * scale_x - 0.5);
sx = floor(fx);
fx -= sx;
if (sx < 0) {
sx = 0;
fx = 0.f;
}
if (sx >= srcw - 1) {
sx = srcw - 2;
fx = 1.f;
}
xofs[dx] = sx;
float a0 = (1.f - fx);
float a1 = fx;
ialpha[dx * 2] = a0;
ialpha[dx * 2 + 1] = a1;
}
for (int dy = 0; dy < dsth; dy++) {
fy = static_cast<float>((dy + 0.5) * scale_y - 0.5);
sy = floor(fy);
fy -= sy;
if (sy < 0) {
sy = 0;
fy = 0.f;
}
if (sy >= srch - 1) {
sy = srch - 2;
fy = 1.f;
}
yofs[dy] = sy;
float b0 = (1.f - fy);
float b1 = fy;
ibeta[dy * 2] = b0;
ibeta[dy * 2 + 1] = b1;
}
}
void image_resize_basic(const uint8_t* in_data,
uint8_t* out_data,
ImageFormat srcFormat,
int srcw,
int srch,
int dstw,
int dsth) {
int size = srcw * srch;
if (srcw == dstw && srch == dsth) {
if (srcFormat == ImageFormat::NV12 || srcFormat == ImageFormat::NV21) {
<<<<<<< HEAD
size = srcw * (ceil(1.5 * srch));
=======
size = srcw * (static_cast<int>(1.5 * srch));
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
} else if (srcFormat == ImageFormat::BGR || srcFormat == ImageFormat::RGB) {
size = 3 * srcw * srch;
} else if (srcFormat == ImageFormat::BGRA ||
srcFormat == ImageFormat::RGBA) {
size = 4 * srcw * srch;
}
memcpy(out_data, in_data, sizeof(uint8_t) * size);
return;
}
<<<<<<< HEAD
double scale_x = static_cast<double>(srcw / dstw);
double scale_y = static_cast<double>(srch / dsth);
=======
double scale_x = static_cast<double>(srcw) / dstw;
double scale_y = static_cast<double>(srch) / dsth;
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
int* buf = new int[dstw + dsth];
int* xofs = buf;
int* yofs = buf + dstw;
float* ialpha = new float[dstw * 2];
<<<<<<< HEAD
float* ibeta = new float[dsth * 2];
=======
float* ibeta = new float[dsth * 3];
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
int w_in = srcw;
int w_out = dstw;
int num = 1;
int orih = dsth;
<<<<<<< HEAD
compute_xy(
srcw, srch, dstw, dsth, scale_x, scale_y, xofs, yofs, ialpha, ibeta);
=======
compute_xy(
srcw, srch, dstw, dsth, scale_x, scale_y, xofs, yofs, ialpha, ibeta);
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
if (srcFormat == ImageFormat::GRAY) {
num = 1;
} else if (srcFormat == ImageFormat::NV12 || srcFormat == ImageFormat::NV21) {
int hout = static_cast<int>(0.5 * dsth);
// uv todo
w_out = dstw;
num = 1;
dsth += hout;
} else if (srcFormat == ImageFormat::BGR || srcFormat == ImageFormat::RGB) {
w_in = srcw * 3;
w_out = dstw * 3;
num = 3;
} else if (srcFormat == ImageFormat::BGRA || srcFormat == ImageFormat::RGBA) {
w_in = srcw * 4;
w_out = dstw * 4;
num = 4;
}
float* ialpha1 = nullptr;
int* xofs1 = nullptr;
int* yofs1 = nullptr;
if (orih < dsth) {
int tmp = dsth - orih;
<<<<<<< HEAD
float* ialpha1 = new float[dstw];
int* xofs1 = new int[srcw];
int* yofs1 = new int[tmp];
compute_xy(srcw / 2,
=======
ialpha1 = new float[dstw];
xofs1 = new int[dstw];
yofs1 = new int[tmp];
compute_xy(srcw,
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
srch / 2,
dstw / 2,
tmp,
scale_x,
scale_y,
xofs1,
yofs1,
ialpha1,
<<<<<<< HEAD
ibeta + dsth);
=======
ibeta + orih * 2);
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
}
#pragma omp parallel for
for (int dy = 0; dy < dsth; dy++) {
uint8_t* out_ptr = out_data + dy * w_out;
int y_in_start = yofs[dy];
<<<<<<< HEAD
int y_in_end = y_in_start + 1;
int y_flag = 0; // only one line
if (y_in_start < 0) {
y_flag = 1;
}
=======
int y_flag = 0;
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
float b0 = ibeta[dy * 2];
float b1 = ibeta[dy * 2 + 1];
if (dy >= orih) {
num = 2; // uv
ialpha = ialpha1;
xofs = xofs1;
yofs = yofs1;
<<<<<<< HEAD
=======
y_in_start = yofs[dy - orih] + srch;
}
int y_in_end = y_in_start + 1;
if (y_in_start < 0) {
y_flag = 1;
y_in_end = 0;
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
}
for (int dx = 0; dx < w_out; dx += num) {
int tmp = dx / num;
int x_in_start = xofs[tmp] * num; // 0
int x_in_end = x_in_start + num; // 2
int x_flag = 0;
if (x_in_start < 0) {
x_flag = 1;
x_in_end = 0;
}
<<<<<<< HEAD
// printf("x_in: %d, y_in: %d \n", x_in_start, y_in_start);
=======
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
float a0 = ialpha[tmp * 2];
float a1 = ialpha[tmp * 2 + 1];
int tl_index = y_in_start * w_in + x_in_start; // 0
int tr_index = y_in_start * w_in + x_in_end; // 2
int bl_index = y_in_end * w_in + x_in_start;
int br_index = y_in_end * w_in + x_in_end;
int ind = dx;
for (int i = 0; i < num; i++) {
int tl = in_data[tl_index];
int tr = in_data[tr_index];
int bl = in_data[bl_index];
int br = in_data[br_index];
if (y_flag == 1) {
tl = 0;
tr = 0;
}
if (x_flag == 1) {
tl = 0;
bl = 0;
}
tl_index++;
tr_index++;
bl_index++;
br_index++;
float outval = (tl * a0 + tr * a1) * b0 + (bl * a0 + br * a1) * b1;
<<<<<<< HEAD
// printf("tl: %d, tr: %d, bl: %d, br: %d \n", tl, tr, bl, br);
// printf("br_index: %d, a0: %f, b1: %f, out: %f \n", br_index, a0, b1,
// outval);
=======
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
out_ptr[ind++] = ceil(outval);
}
}
}
}
void rotate90_basic(const uint8_t* in_data,
int h_in,
int w_in,
uint8_t* out_data,
int h_out,
int w_out,
int num) {
int win = w_in * num;
int wout = w_out * num;
for (int x = 0; x < h_in; x++) {
for (int y = 0; y < w_in; y++) {
int tmpy = y * num;
int tmpx = (w_out - 1 - x) * num; // x
for (int i = 0; i < num; i++) {
out_data[y * wout + tmpx] = in_data[x * win + tmpy];
tmpx++;
tmpy++;
}
}
}
}
void rotate180_basic(const uint8_t* in_data,
int h_in,
int w_in,
uint8_t* out_data,
int h_out,
int w_out,
int num) {
int win = w_in * num;
int h = h_in - 1;
int w = win - 1;
for (int x = 0; x < h_in; x++) {
for (int y = 0; y < w_in; y++) {
int tmpy = y * num;
int tmp = tmpy + (num - 1);
for (int i = 0; i < num; i++) {
out_data[(h - x) * win + w - tmp] = in_data[x * win + tmpy];
tmpy++;
tmp--;
}
}
}
}
void rotate270_basic(const uint8_t* in_data,
int h_in,
int w_in,
uint8_t* out_data,
int h_out,
int w_out,
int num) {
int win = w_in * num;
int wout = w_out * num;
int h = h_out - 1;
for (int x = 0; x < h_in; x++) {
for (int y = 0; y < w_in; y++) {
int tmpy = y * num;
int tmpx = x * num;
for (int i = 0; i < num; i++) {
out_data[(h - y) * wout + tmpx] =
in_data[x * win + tmpy]; // (y,x) = in(x,y)
tmpx++;
tmpy++;
}
}
}
}
void image_rotate_basic(const uint8_t* in_data,
uint8_t* out_data,
ImageFormat srcFormat,
int srcw,
int srch,
float rotate) {
int num = 1;
if (srcFormat == ImageFormat::GRAY) {
num = 1;
} else if (srcFormat == ImageFormat::NV12 || srcFormat == ImageFormat::NV21) {
num = 1; // todo
return;
} else if (srcFormat == ImageFormat::BGR || srcFormat == ImageFormat::RGB) {
num = 3;
} else if (srcFormat == ImageFormat::BGRA || srcFormat == ImageFormat::RGBA) {
num = 4;
}
if (rotate == 90) {
rotate90_basic(in_data, srch, srcw, out_data, srcw, srch, num);
} else if (rotate == 180) {
rotate180_basic(in_data, srch, srcw, out_data, srch, srcw, num);
} else if (rotate == 270) {
rotate270_basic(in_data, srch, srcw, out_data, srcw, srch, num);
}
}
void flipx_basic(
const uint8_t* in_data, int h_in, int w_in, uint8_t* out_data, int num) {
int h = h_in - 1;
int w = w_in * num;
for (int x = 0; x < h_in; x++) {
for (int y = 0; y < w_in; y++) {
int tmpy = y * num;
for (int i = 0; i < num; i++) {
out_data[(h - x) * w + tmpy] =
in_data[x * w + tmpy]; // (y,x) = in(x,y)
tmpy++;
}
}
}
}
void flipy_basic(
const uint8_t* in_data, int h_in, int w_in, uint8_t* out_data, int num) {
int w = w_in * num - 1;
for (int x = 0; x < h_in; x++) {
for (int y = 0; y < w_in; y++) {
int tmpy = y * num;
int tmp = tmpy + (num - 1);
for (int i = 0; i < num; i++) {
out_data[x * w_in * num + w - tmp] =
in_data[x * w_in * num + tmpy]; // (y,x) = in(x,y)
tmpy++;
tmp--;
}
}
}
}
void flipxy_basic(
const uint8_t* in_data, int h_in, int w_in, uint8_t* out_data, int num) {
int win = w_in * num;
int h = h_in - 1;
int w = win - 1;
for (int x = 0; x < h_in; x++) {
for (int y = 0; y < w_in; y++) {
int tmpy = y * num;
int tmp = tmpy + (num - 1);
for (int i = 0; i < num; i++) {
out_data[(h - x) * win + w - tmp] =
in_data[x * win + tmpy]; // (h-y,w-x) = in(x,y)
tmpy++;
tmp--;
}
}
}
}
void image_flip_basic(const uint8_t* in_data,
uint8_t* out_data,
ImageFormat srcFormat,
int srcw,
int srch,
FlipParam flip) {
int num = 1;
if (srcFormat == ImageFormat::GRAY) {
num = 1;
} else if (srcFormat == ImageFormat::NV12 || srcFormat == ImageFormat::NV21) {
num = 1; // todo
return;
} else if (srcFormat == ImageFormat::BGR || srcFormat == ImageFormat::RGB) {
num = 3;
} else if (srcFormat == ImageFormat::BGRA || srcFormat == ImageFormat::RGBA) {
num = 4;
}
// printf("image_flip_basic: %d \n", flip);
if (flip == FlipParam::X) {
flipx_basic(in_data, srch, srcw, out_data, num);
} else if (flip == FlipParam::Y) {
flipy_basic(in_data, srch, srcw, out_data, num);
} else if (flip == FlipParam::XY) {
flipxy_basic(in_data, srch, srcw, out_data, num);
}
}
<<<<<<< HEAD
=======
void gray_to_tensor_basic(const uint8_t* bgr,
float* output,
int width,
int height,
float* means,
float* scales,
int num) {
int size = width * height;
float mean_val = means[0];
float scale_val = scales[0];
for (int h = 0; h < height; h++) {
const uint8_t* ptr_bgr = bgr + h * width * num;
float* ptr_h = output + h * width;
for (int i = 0; i < width; i++) {
*ptr_h++ = (ptr_bgr[0] - mean_val) * scale_val;
ptr_bgr += num;
}
}
}
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
void bgr_to_tensor_chw_basic(const uint8_t* bgr,
float* output,
int width,
int height,
float* means,
float* scales,
int num) {
int size = width * height;
float r_means = means[0];
float g_means = means[1];
float b_means = means[2];
float r_scales = scales[0];
float g_scales = scales[1];
float b_scales = scales[2];
for (int h = 0; h < height; h++) {
const uint8_t* ptr_bgr = bgr + h * width * num;
float* ptr_b = output + h * width;
float* ptr_g = ptr_b + size;
float* ptr_r = ptr_g + size;
for (int i = 0; i < width; i++) {
*ptr_b++ = (ptr_bgr[0] - b_means) * b_scales;
*ptr_g++ = (ptr_bgr[1] - g_means) * g_scales;
*ptr_r++ = (ptr_bgr[2] - r_means) * r_scales;
ptr_bgr += num;
}
}
}
void bgr_to_tensor_hwc_basic(const uint8_t* bgr,
float* output,
int width,
int height,
float* means,
float* scales,
int num) {
int size = width * height;
float r_means = means[0];
float g_means = means[1];
float b_means = means[2];
float r_scales = scales[0];
float g_scales = scales[1];
float b_scales = scales[2];
for (int h = 0; h < height; h++) {
const uint8_t* ptr_bgr = bgr + h * width * num;
float* out_bgr = output + h * width * num;
for (int i = 0; i < width; i++) {
*out_bgr++ = (ptr_bgr[0] - b_means) * b_scales;
*out_bgr++ = (ptr_bgr[1] - g_means) * g_scales;
*out_bgr++ = (ptr_bgr[2] - r_means) * r_scales;
ptr_bgr += num;
}
}
}
void image_to_tensor_basic(const uint8_t* in_data,
Tensor* dst,
ImageFormat srcFormat,
<<<<<<< HEAD
LayOut layout,
=======
LayoutType layout,
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
int srcw,
int srch,
float* means,
float* scales) {
float* output = dst->mutable_data<float>();
<<<<<<< HEAD
if (layout == LayOut::CHW &&
(srcFormat == ImageFormat::BGR || srcFormat == ImageFormat::RGB)) {
bgr_to_tensor_chw_basic(in_data, output, srcw, srch, means, scales, 3);
} else if (layout == LayOut::HWC &&
(srcFormat == ImageFormat::BGR || srcFormat == ImageFormat::RGB)) {
bgr_to_tensor_hwc_basic(in_data, output, srcw, srch, means, scales, 3);
} else if (layout == LayOut::CHW && (srcFormat == ImageFormat::BGRA ||
srcFormat == ImageFormat::RGBA)) {
bgr_to_tensor_chw_basic(in_data, output, srcw, srch, means, scales, 4);
} else if (layout == LayOut::HWC && (srcFormat == ImageFormat::BGRA ||
srcFormat == ImageFormat::RGBA)) {
bgr_to_tensor_hwc_basic(in_data, output, srcw, srch, means, scales, 4);
=======
if (layout == LayoutType::kNCHW &&
(srcFormat == ImageFormat::BGR || srcFormat == ImageFormat::RGB)) {
bgr_to_tensor_chw_basic(in_data, output, srcw, srch, means, scales, 3);
} else if (layout == LayoutType::kNHWC &&
(srcFormat == ImageFormat::BGR || srcFormat == ImageFormat::RGB)) {
bgr_to_tensor_hwc_basic(in_data, output, srcw, srch, means, scales, 3);
} else if (layout == LayoutType::kNCHW && (srcFormat == ImageFormat::BGRA ||
srcFormat == ImageFormat::RGBA)) {
bgr_to_tensor_chw_basic(in_data, output, srcw, srch, means, scales, 4);
} else if (layout == LayoutType::kNHWC && (srcFormat == ImageFormat::BGRA ||
srcFormat == ImageFormat::RGBA)) {
bgr_to_tensor_hwc_basic(in_data, output, srcw, srch, means, scales, 4);
} else if (srcFormat == ImageFormat::GRAY &&
(layout == LayoutType::kNHWC || layout == LayoutType::kNCHW)) {
gray_to_tensor_basic(in_data, output, srcw, srch, means, scales, 1);
>>>>>>> d5b08275c46b2517790d170a469006246f59b6bf
}
}
|
omp50_taskwait_depend.c | // RUN: %libomp-compile-and-run
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
// clang does not yet support taskwait with depend clause
// clang-12 introduced parsing, but no codegen
// TODO: update expected result when codegen in clang is added
// icc does not yet support taskwait with depend clause
// TODO: update expected result when support for icc is added
// XFAIL: clang, icc
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "omp_my_sleep.h"
int a = 0, b = 0;
int task_grabbed = 0, task_can_proceed = 0;
int task2_grabbed = 0, task2_can_proceed = 0;
static void wait_on_flag(int *flag) {
int flag_value;
int timelimit = 30;
int secs = 0;
do {
#pragma omp atomic read
flag_value = *flag;
my_sleep(1.0);
secs++;
if (secs == timelimit) {
fprintf(stderr, "error: timeout in wait_on_flag()\n");
exit(EXIT_FAILURE);
}
} while (flag_value == 0);
}
static void signal_flag(int *flag) {
#pragma omp atomic
(*flag)++;
}
int main(int argc, char** argv) {
// Ensure two threads are running
int num_threads = omp_get_max_threads();
if (num_threads < 2)
omp_set_num_threads(2);
#pragma omp parallel shared(a)
{
int a_value;
// Let us be extra safe here
if (omp_get_num_threads() > 1) {
#pragma omp single nowait
{
// Schedule independent child task that
// waits to be flagged after sebsequent taskwait depend()
#pragma omp task
{
signal_flag(&task_grabbed);
wait_on_flag(&task_can_proceed);
}
// Let another worker thread grab the task to execute
wait_on_flag(&task_grabbed);
// This should be ignored since the task above has
// no dependency information
#pragma omp taskwait depend(inout: a)
// Signal the independent task to proceed
signal_flag(&task_can_proceed);
// Schedule child task with dependencies that taskwait does
// not care about
#pragma omp task depend(inout: b)
{
signal_flag(&task2_grabbed);
wait_on_flag(&task2_can_proceed);
#pragma omp atomic
b++;
}
// Let another worker thread grab the task to execute
wait_on_flag(&task2_grabbed);
// This should be ignored since the task above has
// dependency information on b instead of a
#pragma omp taskwait depend(inout: a)
// Signal the task to proceed
signal_flag(&task2_can_proceed);
// Generate one child task for taskwait
#pragma omp task shared(a) depend(inout: a)
{
my_sleep(1.0);
#pragma omp atomic
a++;
}
#pragma omp taskwait depend(inout: a)
#pragma omp atomic read
a_value = a;
if (a_value != 1) {
fprintf(stderr, "error: dependent task was not executed before "
"taskwait finished\n");
exit(EXIT_FAILURE);
}
} // #pragma omp single
} // if (num_threads > 1)
} // #pragma omp parallel
return EXIT_SUCCESS;
}
|
omp_sample1.c | #include<stdio.h>
#include<omp.h>
int main(){
int a[1000];
int b[1000];
int c[1000];
int i;
// #pragma omp parallel for
for(i=0;i<1000;i++){
a[i] = i;
b[i] = 1;
c[i] = a[i] + b[i];
}
for(i=0;i<1000;i++){
printf("%d\n",c[i]);
}
return 0;
}
|
pintegral.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
/* Funcion f(x) de la cual se quiere calcular la integral */
double f(double x)
{
// return pow(10*x-14.0/3.0,3)-150*x+120;
return M_PI/2.0*sin(x*M_PI);
}
/* Calculo de la integral de la funcion f. Variante 1 */
double calcula_integral1(double a, double b, int n)
{
double h, s=0, result;
int i;
h=(b-a)/n;
#pragma omp parallel for reduction(+:s)
for (i=0; i<n; i++) {
s+=f(a+h*(i+0.5));
}
result = h*s;
return result;
}
/* Calculo de la integral de la funcion f. Variante 2 */
double calcula_integral2(double a, double b, int n)
{
double x, h, s=0, result;
int i;
h=(b-a)/n;
#pragma omp parallel for reduction(+:s) private(x)
for (i=0; i<n; i++) {
x=a;
x+=h*(i+0.5);
s+=f(x);
}
result = h*s;
return result;
}
int main(int argc, char *argv[])
{
double a, b, result;
int n, variante;
printf("Número de hilos fuera de la región paralela de OpenMP = %d\n", omp_get_num_threads());
if (argc<2) {
fprintf(stderr, "Numero de argumentos incorrecto\n");
return 1;
}
if (argc>2) n=atoi(argv[2]);
else n=100000;
a=0;
b=1;
#pragma omp parallel
{
int id_hilo = omp_get_thread_num();
if(id_hilo==0)
printf("Número de hilos: %d\n", omp_get_num_threads());
//printf("ID del hilo ejecutando región paralela = %d\n", id_hilo);
}
variante=atoi(argv[1]);
double t;
t = omp_get_wtime();
switch (variante) {
case 1:
result = calcula_integral1(a,b,n);
break;
case 2:
result = calcula_integral2(a,b,n);
break;
default:
fprintf(stderr, "Numero de variante incorrecto\n");
}
t = omp_get_wtime() - t;
printf("El tiempo de ejecución ha sido de %.12f segundos\n", t);
printf("Número máximo de hilos en ejecución = %d\n", omp_get_max_threads());
printf("Valor de la integral = %.12f\n", result);
return 0;
}
|
kmeans.c | /*
** © 2011-2016 by Kornel Lesiński.
** See COPYRIGHT file for license.
*/
#include "libimagequant.h"
#include "pam.h"
#include "kmeans.h"
#include "nearest.h"
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#define omp_get_thread_num() 0
#endif
/*
* K-Means iteration: new palette color is computed from weighted average of colors that map to that palette entry.
*/
LIQ_PRIVATE void kmeans_init(const colormap *map, const unsigned int max_threads, kmeans_state average_color[])
{
memset(average_color, 0, sizeof(average_color[0])*(KMEANS_CACHE_LINE_GAP+map->colors)*max_threads);
}
LIQ_PRIVATE void kmeans_update_color(const f_pixel acolor, const float value, const colormap *map, unsigned int match, const unsigned int thread, kmeans_state average_color[])
{
match += thread * (KMEANS_CACHE_LINE_GAP+map->colors);
average_color[match].a += acolor.a * value;
average_color[match].r += acolor.r * value;
average_color[match].g += acolor.g * value;
average_color[match].b += acolor.b * value;
average_color[match].total += value;
}
LIQ_PRIVATE void kmeans_finalize(colormap *map, const unsigned int max_threads, const kmeans_state average_color[])
{
for (unsigned int i=0; i < map->colors; i++) {
double a=0, r=0, g=0, b=0, total=0;
// Aggregate results from all threads
for(unsigned int t=0; t < max_threads; t++) {
const unsigned int offset = (KMEANS_CACHE_LINE_GAP+map->colors) * t + i;
a += average_color[offset].a;
r += average_color[offset].r;
g += average_color[offset].g;
b += average_color[offset].b;
total += average_color[offset].total;
}
if (total && !map->palette[i].fixed) {
map->palette[i].acolor = (f_pixel){
.a = a / total,
.r = r / total,
.g = g / total,
.b = b / total,
};
map->palette[i].popularity = total;
}
}
}
LIQ_PRIVATE double kmeans_do_iteration(histogram *hist, colormap *const map, kmeans_callback callback)
{
const unsigned int max_threads = omp_get_max_threads();
kmeans_state average_color[(KMEANS_CACHE_LINE_GAP+map->colors) * max_threads];
kmeans_init(map, max_threads, average_color);
struct nearest_map *const n = nearest_init(map);
hist_item *const achv = hist->achv;
const int hist_size = hist->size;
double total_diff=0;
#pragma omp parallel for if (hist_size > 2000) \
schedule(static) default(none) shared(average_color,callback) reduction(+:total_diff)
for(int j=0; j < hist_size; j++) {
float diff;
unsigned int match = nearest_search(n, &achv[j].acolor, achv[j].tmp.likely_colormap_index, &diff);
achv[j].tmp.likely_colormap_index = match;
total_diff += diff * achv[j].perceptual_weight;
kmeans_update_color(achv[j].acolor, achv[j].perceptual_weight, map, match, omp_get_thread_num(), average_color);
if (callback) callback(&achv[j], diff);
}
nearest_free(n);
kmeans_finalize(map, max_threads, average_color);
return total_diff / hist->total_perceptual_weight;
}
|
hypre_smp_forloop.h | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.4 $
***********************************************************************EHEADER*/
/*****************************************************************************
* Wrapper code for SMP compiler directives. Translates
* hypre SMP directives into the appropriate Open MP,
* IBM, SGI, or pgcc (Red) SMP compiler directives.
****************************************************************************/
#ifdef HYPRE_USING_OPENMP
#ifndef HYPRE_SMP_REDUCTION_OP
#pragma omp parallel for private(HYPRE_SMP_PRIVATE) \
schedule(static)
#endif
#ifdef HYPRE_SMP_REDUCTION_OP
#pragma omp parallel for private(HYPRE_SMP_PRIVATE) \
reduction(HYPRE_SMP_REDUCTION_OP: HYPRE_SMP_REDUCTION_VARS) \
schedule(static)
#endif
#endif
#ifdef HYPRE_USING_SGI_SMP
#pragma parallel
#pragma pfor
#pragma schedtype(gss)
#pragma chunksize(10)
#endif
#ifdef HYPRE_USING_IBM_SMP
#pragma parallel_loop
#pragma schedule (guided,10)
#endif
#ifdef HYPRE_USING_PGCC_SMP
#ifndef HYPRE_SMP_REDUCTION_OP
#pragma parallel local(HYPRE_SMP_PRIVATE) pfor
#endif
#ifdef HYPRE_SMP_REDUCTION_OP
#endif
#endif
#undef HYPRE_SMP_PRIVATE
#undef HYPRE_SMP_REDUCTION_OP
#undef HYPRE_SMP_REDUCTION_VARS
|
GB_binop__first_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__first_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__first_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__first_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_int64)
// A*D function (colscale): GB (_AxD__first_int64)
// D*A function (rowscale): GB (_DxB__first_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__first_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__first_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_int64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 1
// BinaryOp: cij = aij
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_INT64 || GxB_NO_FIRST_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__first_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
GB_binop__ne_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ne_int8
// A.*B function (eWiseMult): GB_AemultB__ne_int8
// A*D function (colscale): GB_AxD__ne_int8
// D*A function (rowscale): GB_DxB__ne_int8
// C+=B function (dense accum): GB_Cdense_accumB__ne_int8
// C+=b function (dense accum): GB_Cdense_accumb__ne_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_int8
// C=scalar+B GB_bind1st__ne_int8
// C=scalar+B' GB_bind1st_tran__ne_int8
// C=A+scalar GB_bind2nd__ne_int8
// C=A'+scalar GB_bind2nd_tran__ne_int8
// C type: bool
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT8 || GxB_NO_NE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ne_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ne_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ne_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ne_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ne_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__ne_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ne_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ne_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ne_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB_bind1st_tran__ne_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB_bind2nd_tran__ne_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
teams_nest.c | #include <stdio.h>
#include <omp.h>
int main(void) {
int fail = 0;
//
// Test: num_teams and omp_get_team_num()
#pragma omp target
{
printf("Num_teams=%d\n", omp_get_num_teams());
}
#pragma omp target
{
#pragma omp teams
{
if (omp_get_team_num() == 0)
printf("Num_teams=%d\n", omp_get_num_teams());
#pragma omp distribute
for (int i=0; i< 10; i++)
printf("team %d thread %d\n", omp_get_team_num(), omp_get_thread_num());
}
}
return fail;
}
|
omp_for_schedule_auto.c | // RUN: %libomp-compile-and-run
// REQUIRES: !(abt && (clang || gcc))
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "omp_testsuite.h"
int sum1;
#pragma omp threadprivate(sum1)
int test_omp_for_auto()
{
int j;
int sum;
int sum0;
int known_sum;
int threadsnum;
sum = 0;
sum0 = 12345;
// array which keeps track of which threads participated in the for loop
// e.g., given 4 threads, [ 0 | 1 | 1 | 0 ] implies
// threads 0 and 3 did not, threads 1 and 2 did
int max_threads = omp_get_max_threads();
int* active_threads = (int*)malloc(sizeof(int)*max_threads);
for(j = 0; j < max_threads; j++)
active_threads[j] = 0;
#pragma omp parallel
{
int i;
sum1 = 0;
#pragma omp for firstprivate(sum0) schedule(auto)
for (i = 1; i <= LOOPCOUNT; i++) {
active_threads[omp_get_thread_num()] = 1;
sum0 = sum0 + i;
sum1 = sum0;
}
#pragma omp critical
{
sum = sum + sum1;
}
}
// count the threads that participated (sum is stored in threadsnum)
threadsnum=0;
for(j = 0; j < max_threads; j++) {
if(active_threads[j])
threadsnum++;
}
free(active_threads);
known_sum = 12345 * threadsnum + (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
return (known_sum == sum);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_for_auto()) {
num_failed++;
}
}
return num_failed;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.